summaryrefslogtreecommitdiff
path: root/arch/alpha
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@ppc970.osdl.org>2005-04-17 02:20:36 +0400
committerLinus Torvalds <torvalds@ppc970.osdl.org>2005-04-17 02:20:36 +0400
commit1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 (patch)
tree0bba044c4ce775e45a88a51686b5d9f90697ea9d /arch/alpha
downloadlinux-1da177e4c3f41524e886b7f1b8a0c1fc7321cac2.tar.xz
Linux-2.6.12-rc2v2.6.12-rc2
Initial git repository build. I'm not bothering with the full history, even though we have it. We can create a separate "historical" git archive of that later if we want to, and in the meantime it's about 3.2GB when imported into git - space that would just make the early git days unnecessarily complicated, when we don't have a lot of good infrastructure for it. Let it rip!
Diffstat (limited to 'arch/alpha')
-rw-r--r--arch/alpha/Kconfig606
-rw-r--r--arch/alpha/Kconfig.debug59
-rw-r--r--arch/alpha/Makefile130
-rw-r--r--arch/alpha/boot/Makefile116
-rw-r--r--arch/alpha/boot/bootloader.lds24
-rw-r--r--arch/alpha/boot/bootp.c214
-rw-r--r--arch/alpha/boot/bootpz.c469
-rw-r--r--arch/alpha/boot/head.S123
-rw-r--r--arch/alpha/boot/main.c191
-rw-r--r--arch/alpha/boot/misc.c207
-rw-r--r--arch/alpha/boot/tools/mkbb.c151
-rw-r--r--arch/alpha/boot/tools/objstrip.c281
-rw-r--r--arch/alpha/defconfig927
-rw-r--r--arch/alpha/kernel/Makefile104
-rw-r--r--arch/alpha/kernel/alpha_ksyms.c235
-rw-r--r--arch/alpha/kernel/asm-offsets.c43
-rw-r--r--arch/alpha/kernel/console.c66
-rw-r--r--arch/alpha/kernel/core_apecs.c418
-rw-r--r--arch/alpha/kernel/core_cia.c1212
-rw-r--r--arch/alpha/kernel/core_irongate.c416
-rw-r--r--arch/alpha/kernel/core_lca.c515
-rw-r--r--arch/alpha/kernel/core_marvel.c1154
-rw-r--r--arch/alpha/kernel/core_mcpcia.c618
-rw-r--r--arch/alpha/kernel/core_polaris.c203
-rw-r--r--arch/alpha/kernel/core_t2.c622
-rw-r--r--arch/alpha/kernel/core_titan.c806
-rw-r--r--arch/alpha/kernel/core_tsunami.c459
-rw-r--r--arch/alpha/kernel/core_wildfire.c658
-rw-r--r--arch/alpha/kernel/entry.S957
-rw-r--r--arch/alpha/kernel/err_common.c321
-rw-r--r--arch/alpha/kernel/err_ev6.c274
-rw-r--r--arch/alpha/kernel/err_ev7.c289
-rw-r--r--arch/alpha/kernel/err_impl.h85
-rw-r--r--arch/alpha/kernel/err_marvel.c1159
-rw-r--r--arch/alpha/kernel/err_titan.c756
-rw-r--r--arch/alpha/kernel/es1888.c49
-rw-r--r--arch/alpha/kernel/gct.c48
-rw-r--r--arch/alpha/kernel/head.S99
-rw-r--r--arch/alpha/kernel/init_task.c23
-rw-r--r--arch/alpha/kernel/io.c630
-rw-r--r--arch/alpha/kernel/irq.c774
-rw-r--r--arch/alpha/kernel/irq_alpha.c252
-rw-r--r--arch/alpha/kernel/irq_i8259.c183
-rw-r--r--arch/alpha/kernel/irq_impl.h42
-rw-r--r--arch/alpha/kernel/irq_pyxis.c127
-rw-r--r--arch/alpha/kernel/irq_srm.c79
-rw-r--r--arch/alpha/kernel/machvec_impl.h150
-rw-r--r--arch/alpha/kernel/module.c311
-rw-r--r--arch/alpha/kernel/ns87312.c38
-rw-r--r--arch/alpha/kernel/osf_sys.c1345
-rw-r--r--arch/alpha/kernel/pci-noop.c214
-rw-r--r--arch/alpha/kernel/pci.c561
-rw-r--r--arch/alpha/kernel/pci_impl.h209
-rw-r--r--arch/alpha/kernel/pci_iommu.c971
-rw-r--r--arch/alpha/kernel/process.c528
-rw-r--r--arch/alpha/kernel/proto.h210
-rw-r--r--arch/alpha/kernel/ptrace.c415
-rw-r--r--arch/alpha/kernel/semaphore.c224
-rw-r--r--arch/alpha/kernel/setup.c1486
-rw-r--r--arch/alpha/kernel/signal.c672
-rw-r--r--arch/alpha/kernel/smc37c669.c2554
-rw-r--r--arch/alpha/kernel/smc37c93x.c277
-rw-r--r--arch/alpha/kernel/smp.c1163
-rw-r--r--arch/alpha/kernel/srm_env.c335
-rw-r--r--arch/alpha/kernel/srmcons.c326
-rw-r--r--arch/alpha/kernel/sys_alcor.c326
-rw-r--r--arch/alpha/kernel/sys_cabriolet.c448
-rw-r--r--arch/alpha/kernel/sys_dp264.c689
-rw-r--r--arch/alpha/kernel/sys_eb64p.c256
-rw-r--r--arch/alpha/kernel/sys_eiger.c242
-rw-r--r--arch/alpha/kernel/sys_jensen.c274
-rw-r--r--arch/alpha/kernel/sys_marvel.c499
-rw-r--r--arch/alpha/kernel/sys_miata.c289
-rw-r--r--arch/alpha/kernel/sys_mikasa.c265
-rw-r--r--arch/alpha/kernel/sys_nautilus.c269
-rw-r--r--arch/alpha/kernel/sys_noritake.c347
-rw-r--r--arch/alpha/kernel/sys_rawhide.c270
-rw-r--r--arch/alpha/kernel/sys_ruffian.c240
-rw-r--r--arch/alpha/kernel/sys_rx164.c220
-rw-r--r--arch/alpha/kernel/sys_sable.c653
-rw-r--r--arch/alpha/kernel/sys_sio.c438
-rw-r--r--arch/alpha/kernel/sys_sx164.c178
-rw-r--r--arch/alpha/kernel/sys_takara.c296
-rw-r--r--arch/alpha/kernel/sys_titan.c420
-rw-r--r--arch/alpha/kernel/sys_wildfire.c361
-rw-r--r--arch/alpha/kernel/systbls.S468
-rw-r--r--arch/alpha/kernel/time.c591
-rw-r--r--arch/alpha/kernel/traps.c1092
-rw-r--r--arch/alpha/kernel/vmlinux.lds.S149
-rw-r--r--arch/alpha/lib/Makefile58
-rw-r--r--arch/alpha/lib/callback_srm.S104
-rw-r--r--arch/alpha/lib/checksum.c186
-rw-r--r--arch/alpha/lib/clear_page.S39
-rw-r--r--arch/alpha/lib/clear_user.S113
-rw-r--r--arch/alpha/lib/copy_page.S49
-rw-r--r--arch/alpha/lib/copy_user.S145
-rw-r--r--arch/alpha/lib/csum_ipv6_magic.S92
-rw-r--r--arch/alpha/lib/csum_partial_copy.c391
-rw-r--r--arch/alpha/lib/dbg_current.S29
-rw-r--r--arch/alpha/lib/dbg_stackcheck.S27
-rw-r--r--arch/alpha/lib/dbg_stackkill.S35
-rw-r--r--arch/alpha/lib/dec_and_lock.c42
-rw-r--r--arch/alpha/lib/divide.S195
-rw-r--r--arch/alpha/lib/ev6-clear_page.S54
-rw-r--r--arch/alpha/lib/ev6-clear_user.S225
-rw-r--r--arch/alpha/lib/ev6-copy_page.S203
-rw-r--r--arch/alpha/lib/ev6-copy_user.S259
-rw-r--r--arch/alpha/lib/ev6-csum_ipv6_magic.S126
-rw-r--r--arch/alpha/lib/ev6-divide.S259
-rw-r--r--arch/alpha/lib/ev6-memchr.S191
-rw-r--r--arch/alpha/lib/ev6-memcpy.S248
-rw-r--r--arch/alpha/lib/ev6-memset.S597
-rw-r--r--arch/alpha/lib/ev6-strncpy_from_user.S424
-rw-r--r--arch/alpha/lib/ev6-stxcpy.S321
-rw-r--r--arch/alpha/lib/ev6-stxncpy.S397
-rw-r--r--arch/alpha/lib/ev67-strcat.S54
-rw-r--r--arch/alpha/lib/ev67-strchr.S88
-rw-r--r--arch/alpha/lib/ev67-strlen.S49
-rw-r--r--arch/alpha/lib/ev67-strlen_user.S107
-rw-r--r--arch/alpha/lib/ev67-strncat.S94
-rw-r--r--arch/alpha/lib/ev67-strrchr.S109
-rw-r--r--arch/alpha/lib/fpreg.c193
-rw-r--r--arch/alpha/lib/memchr.S164
-rw-r--r--arch/alpha/lib/memcpy.c163
-rw-r--r--arch/alpha/lib/memmove.S181
-rw-r--r--arch/alpha/lib/memset.S124
-rw-r--r--arch/alpha/lib/srm_printk.c41
-rw-r--r--arch/alpha/lib/srm_puts.c23
-rw-r--r--arch/alpha/lib/stacktrace.c103
-rw-r--r--arch/alpha/lib/strcasecmp.c26
-rw-r--r--arch/alpha/lib/strcat.S52
-rw-r--r--arch/alpha/lib/strchr.S70
-rw-r--r--arch/alpha/lib/strcpy.S23
-rw-r--r--arch/alpha/lib/strlen.S57
-rw-r--r--arch/alpha/lib/strlen_user.S91
-rw-r--r--arch/alpha/lib/strncat.S84
-rw-r--r--arch/alpha/lib/strncpy.S81
-rw-r--r--arch/alpha/lib/strncpy_from_user.S339
-rw-r--r--arch/alpha/lib/strrchr.S87
-rw-r--r--arch/alpha/lib/stxcpy.S289
-rw-r--r--arch/alpha/lib/stxncpy.S345
-rw-r--r--arch/alpha/lib/udelay.c55
-rw-r--r--arch/alpha/math-emu/Makefile9
-rw-r--r--arch/alpha/math-emu/math.c400
-rw-r--r--arch/alpha/math-emu/qrnnd.S163
-rw-r--r--arch/alpha/math-emu/sfp-util.h35
-rw-r--r--arch/alpha/mm/Makefile9
-rw-r--r--arch/alpha/mm/extable.c34
-rw-r--r--arch/alpha/mm/fault.c247
-rw-r--r--arch/alpha/mm/init.c382
-rw-r--r--arch/alpha/mm/numa.c395
-rw-r--r--arch/alpha/mm/remap.c90
-rw-r--r--arch/alpha/oprofile/Kconfig23
-rw-r--r--arch/alpha/oprofile/Makefile19
-rw-r--r--arch/alpha/oprofile/common.c189
-rw-r--r--arch/alpha/oprofile/op_impl.h55
-rw-r--r--arch/alpha/oprofile/op_model_ev4.c116
-rw-r--r--arch/alpha/oprofile/op_model_ev5.c211
-rw-r--r--arch/alpha/oprofile/op_model_ev6.c103
-rw-r--r--arch/alpha/oprofile/op_model_ev67.c263
160 files changed, 49087 insertions, 0 deletions
diff --git a/arch/alpha/Kconfig b/arch/alpha/Kconfig
new file mode 100644
index 000000000000..0c79b9d95f74
--- /dev/null
+++ b/arch/alpha/Kconfig
@@ -0,0 +1,606 @@
+#
+# For a description of the syntax of this configuration file,
+# see Documentation/kbuild/kconfig-language.txt.
+#
+config ALPHA
+ bool
+ default y
+ help
+ The Alpha is a 64-bit general-purpose processor designed and
+ marketed by the Digital Equipment Corporation of blessed memory,
+ now Hewlett-Packard. The Alpha Linux project has a home page at
+ <http://www.alphalinux.org/>.
+
+config 64BIT
+ def_bool y
+
+config MMU
+ bool
+ default y
+
+config UID16
+ bool
+
+config RWSEM_GENERIC_SPINLOCK
+ bool
+
+config RWSEM_XCHGADD_ALGORITHM
+ bool
+ default y
+
+config GENERIC_CALIBRATE_DELAY
+ bool
+ default y
+
+config GENERIC_ISA_DMA
+ bool
+ default y
+
+config GENERIC_IOMAP
+ bool
+ default n
+
+source "init/Kconfig"
+
+
+menu "System setup"
+
+choice
+ prompt "Alpha system type"
+ default ALPHA_GENERIC
+ ---help---
+ This is the system type of your hardware. A "generic" kernel will
+ run on any supported Alpha system. However, if you configure a
+ kernel for your specific system, it will be faster and smaller.
+
+ To find out what type of Alpha system you have, you may want to
+ check out the Linux/Alpha FAQ, accessible on the WWW from
+ <http://www.alphalinux.org/>. In summary:
+
+ Alcor/Alpha-XLT AS 600
+ Alpha-XL XL-233, XL-266
+ AlphaBook1 Alpha laptop
+ Avanti AS 200, AS 205, AS 250, AS 255, AS 300, AS 400
+ Cabriolet AlphaPC64, AlphaPCI64
+ DP264 DP264
+ EB164 EB164 21164 evaluation board
+ EB64+ EB64+ 21064 evaluation board
+ EB66 EB66 21066 evaluation board
+ EB66+ EB66+ 21066 evaluation board
+ Jensen DECpc 150, DEC 2000 model 300,
+ DEC 2000 model 500
+ LX164 AlphaPC164-LX
+ Lynx AS 2100A
+ Miata Personal Workstation 433a, 433au, 500a,
+ 500au, 600a, or 600au
+ Marvel AlphaServer ES47 / ES80 / GS1280
+ Mikasa AS 1000
+ Noname AXPpci33, UDB (Multia)
+ Noritake AS 1000A, AS 600A, AS 800
+ PC164 AlphaPC164
+ Rawhide AS 1200, AS 4000, AS 4100
+ Ruffian RPX164-2, AlphaPC164-UX, AlphaPC164-BX
+ SX164 AlphaPC164-SX
+ Sable AS 2000, AS 2100
+ Shark DS 20L
+ Takara Takara
+ Titan AlphaServer ES45 / DS25
+ Wildfire AlphaServer GS 40/80/160/320
+
+ If you don't know what to do, choose "generic".
+
+config ALPHA_GENERIC
+ bool "Generic"
+ help
+ A generic kernel will run on all supported Alpha hardware.
+
+config ALPHA_ALCOR
+ bool "Alcor/Alpha-XLT"
+ help
+ For systems using the Digital ALCOR chipset: 5 chips (4, 64-bit data
+ slices (Data Switch, DSW) - 208-pin PQFP and 1 control (Control, I/O
+ Address, CIA) - a 383 pin plastic PGA). It provides a DRAM
+ controller (256-bit memory bus) and a PCI interface. It also does
+ all the work required to support an external Bcache and to maintain
+ memory coherence when a PCI device DMAs into (or out of) memory.
+
+config ALPHA_XL
+ bool "Alpha-XL"
+ help
+ XL-233 and XL-266-based Alpha systems.
+
+config ALPHA_BOOK1
+ bool "AlphaBook1"
+ help
+ Dec AlphaBook1/Burns Alpha-based laptops.
+
+config ALPHA_AVANTI_CH
+ bool "Avanti"
+
+config ALPHA_CABRIOLET
+ bool "Cabriolet"
+ help
+ Cabriolet AlphaPC64, AlphaPCI64 systems. Derived from EB64+ but now
+ baby-AT with Flash boot ROM, no on-board SCSI or Ethernet. 3 ISA
+ slots, 4 PCI slots (one pair are on a shared slot), uses plug-in
+ Bcache SIMMs. Requires power supply with 3.3V output.
+
+config ALPHA_DP264
+ bool "DP264"
+ help
+ Various 21264 systems with the tsunami core logic chipset.
+ API Networks: 264DP, UP2000(+), CS20;
+ Compaq: DS10(E,L), XP900, XP1000, DS20(E), ES40.
+
+config ALPHA_EB164
+ bool "EB164"
+ help
+ EB164 21164 evaluation board from DEC. Uses 21164 and ALCOR. Has
+ ISA and PCI expansion (3 ISA slots, 2 64-bit PCI slots (one is
+ shared with an ISA slot) and 2 32-bit PCI slots. Uses plus-in
+ Bcache SIMMs. I/O sub-system provides SuperI/O (2S, 1P, FD), KBD,
+ MOUSE (PS2 style), RTC/NVRAM. Boot ROM is Flash. PC-AT-sized
+ motherboard. Requires power supply with 3.3V output.
+
+config ALPHA_EB64P_CH
+ bool "EB64+"
+
+config ALPHA_EB66
+ bool "EB66"
+ help
+ A Digital DS group board. Uses 21066 or 21066A. I/O sub-system is
+ identical to EB64+. Baby PC-AT size. Runs from standard PC power
+ supply. The EB66 schematic was published as a marketing poster
+ advertising the 21066 as "the first microprocessor in the world with
+ embedded PCI".
+
+config ALPHA_EB66P
+ bool "EB66+"
+ help
+ Later variant of the EB66 board.
+
+config ALPHA_EIGER
+ bool "Eiger"
+ help
+ Apparently an obscure OEM single-board computer based on the
+ Typhoon/Tsunami chipset family. Information on it is scanty.
+
+config ALPHA_JENSEN
+ bool "Jensen"
+ help
+ DEC PC 150 AXP (aka Jensen): This is a very old Digital system - one
+ of the first-generation Alpha systems. A number of these systems
+ seem to be available on the second- hand market. The Jensen is a
+ floor-standing tower system which originally used a 150MHz 21064 It
+ used programmable logic to interface a 486 EISA I/O bridge to the
+ CPU.
+
+config ALPHA_LX164
+ bool "LX164"
+ help
+ A technical overview of this board is available at
+ <http://www.unix-ag.org/Linux-Alpha/Architectures/LX164.html>.
+
+config ALPHA_LYNX
+ bool "Lynx"
+ help
+ AlphaServer 2100A-based systems.
+
+config ALPHA_MARVEL
+ bool "Marvel"
+ help
+ AlphaServer ES47 / ES80 / GS1280 based on EV7.
+
+config ALPHA_MIATA
+ bool "Miata"
+ help
+ The Digital PersonalWorkStation (PWS 433a, 433au, 500a, 500au, 600a,
+ or 600au). There is an Installation HOWTO for this hardware at
+ <http://eijk.homelinux.org/~stefan/miata.html>.
+
+config ALPHA_MIKASA
+ bool "Mikasa"
+ help
+ AlphaServer 1000-based Alpha systems.
+
+config ALPHA_NAUTILUS
+ bool "Nautilus"
+ help
+ Alpha systems based on the AMD 751 & ALI 1543C chipsets.
+
+config ALPHA_NONAME_CH
+ bool "Noname"
+
+config ALPHA_NORITAKE
+ bool "Noritake"
+ help
+ AlphaServer 1000A, AlphaServer 600A, and AlphaServer 800-based
+ systems.
+
+config ALPHA_PC164
+ bool "PC164"
+
+config ALPHA_P2K
+ bool "Platform2000"
+
+config ALPHA_RAWHIDE
+ bool "Rawhide"
+ help
+ AlphaServer 1200, AlphaServer 4000 and AlphaServer 4100 machines.
+ See HOWTO at
+ <http://www.alphalinux.org/docs/rawhide/4100_install.shtml>.
+
+config ALPHA_RUFFIAN
+ bool "Ruffian"
+ help
+ Samsung APC164UX. There is a page on known problems and workarounds
+ at <http://www.alphalinux.org/faq/FAQ-11.html>.
+
+config ALPHA_RX164
+ bool "RX164"
+
+config ALPHA_SX164
+ bool "SX164"
+
+config ALPHA_SABLE
+ bool "Sable"
+ help
+ Digital AlphaServer 2000 and 2100-based systems.
+
+config ALPHA_SHARK
+ bool "Shark"
+
+config ALPHA_TAKARA
+ bool "Takara"
+ help
+ Alpha 11164-based OEM single-board computer.
+
+config ALPHA_TITAN
+ bool "Titan"
+ help
+ AlphaServer ES45/DS25 SMP based on EV68 and Titan chipset.
+
+config ALPHA_WILDFIRE
+ bool "Wildfire"
+ help
+ AlphaServer GS 40/80/160/320 SMP based on the EV67 core.
+
+endchoice
+
+# clear all implied options (don't want default values for those):
+# Most of these machines have ISA slots; not exactly sure which don't,
+# and this doesn't activate hordes of code, so do it always.
+config ISA
+ bool
+ default y
+ help
+ Find out whether you have ISA slots on your motherboard. ISA is the
+ name of a bus system, i.e. the way the CPU talks to the other stuff
+ inside your box. Other bus systems are PCI, EISA, MicroChannel
+ (MCA) or VESA. ISA is an older system, now being displaced by PCI;
+ newer boards don't support it. If you have ISA, say Y, otherwise N.
+
+config PCI
+ bool
+ depends on !ALPHA_JENSEN
+ default y
+ help
+ Find out whether you have a PCI motherboard. PCI is the name of a
+ bus system, i.e. the way the CPU talks to the other stuff inside
+ your box. Other bus systems are ISA, EISA, MicroChannel (MCA) or
+ VESA. If you have PCI, say Y, otherwise N.
+
+ The PCI-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>, contains valuable
+ information about which PCI hardware does work under Linux and which
+ doesn't.
+
+config PCI_DOMAINS
+ bool
+ default y
+
+config ALPHA_CORE_AGP
+ bool
+ depends on ALPHA_GENERIC || ALPHA_TITAN || ALPHA_MARVEL
+ default y
+
+config ALPHA_NONAME
+ bool
+ depends on ALPHA_BOOK1 || ALPHA_NONAME_CH
+ default y
+ help
+ The AXPpci33 (aka NoName), is based on the EB66 (includes the Multia
+ UDB). This design was produced by Digital's Technical OEM (TOEM)
+ group. It uses the 21066 processor running at 166MHz or 233MHz. It
+ is a baby-AT size, and runs from a standard PC power supply. It has
+ 5 ISA slots and 3 PCI slots (one pair are a shared slot). There are
+ 2 versions, with either PS/2 or large DIN connectors for the
+ keyboard.
+
+config ALPHA_EV4
+ bool
+ depends on ALPHA_JENSEN || (ALPHA_SABLE && !ALPHA_GAMMA) || ALPHA_LYNX || ALPHA_NORITAKE && !ALPHA_PRIMO || ALPHA_MIKASA && !ALPHA_PRIMO || ALPHA_CABRIOLET || ALPHA_AVANTI_CH || ALPHA_EB64P_CH || ALPHA_XL || ALPHA_NONAME || ALPHA_EB66 || ALPHA_EB66P || ALPHA_P2K
+ default y if !ALPHA_LYNX
+
+config ALPHA_LCA
+ bool
+ depends on ALPHA_NONAME || ALPHA_EB66 || ALPHA_EB66P || ALPHA_P2K
+ default y
+
+config ALPHA_APECS
+ bool
+ depends on !ALPHA_PRIMO && (ALPHA_NORITAKE || ALPHA_MIKASA) || ALPHA_CABRIOLET || ALPHA_AVANTI_CH || ALPHA_EB64P_CH || ALPHA_XL
+ default y
+
+config ALPHA_EB64P
+ bool
+ depends on ALPHA_CABRIOLET || ALPHA_EB64P_CH
+ default y
+ help
+ Uses 21064 or 21064A and APECs. Has ISA and PCI expansion (3 ISA,
+ 2 PCI, one pair are on a shared slot). Supports 36-bit DRAM SIMs.
+ ISA bus generated by Intel SaturnI/O PCI-ISA bridge. On-board SCSI
+ (NCR 810 on PCI) Ethernet (Digital 21040), KBD, MOUSE (PS2 style),
+ SuperI/O (2S, 1P, FD), RTC/NVRAM. Boot ROM is EPROM. PC-AT size.
+ Runs from standard PC power supply.
+
+config ALPHA_EV5
+ bool "EV5 CPU(s) (model 5/xxx)?" if ALPHA_LYNX
+ default y if ALPHA_RX164 || ALPHA_RAWHIDE || ALPHA_MIATA || ALPHA_LX164 || ALPHA_SX164 || ALPHA_RUFFIAN || ALPHA_SABLE && ALPHA_GAMMA || ALPHA_NORITAKE && ALPHA_PRIMO || ALPHA_MIKASA && ALPHA_PRIMO || ALPHA_PC164 || ALPHA_TAKARA || ALPHA_EB164 || ALPHA_ALCOR
+
+config ALPHA_EV4
+ bool
+ default y if ALPHA_LYNX && !ALPHA_EV5
+
+config ALPHA_CIA
+ bool
+ depends on ALPHA_MIATA || ALPHA_LX164 || ALPHA_SX164 || ALPHA_RUFFIAN || ALPHA_NORITAKE && ALPHA_PRIMO || ALPHA_MIKASA && ALPHA_PRIMO || ALPHA_PC164 || ALPHA_TAKARA || ALPHA_EB164 || ALPHA_ALCOR
+ default y
+
+config ALPHA_EV56
+ bool "EV56 CPU (speed >= 366MHz)?" if ALPHA_ALCOR
+ default y if ALPHA_RX164 || ALPHA_MIATA || ALPHA_LX164 || ALPHA_SX164 || ALPHA_RUFFIAN || ALPHA_PC164 || ALPHA_TAKARA
+
+config ALPHA_EV56
+ prompt "EV56 CPU (speed >= 333MHz)?"
+ depends on ALPHA_NORITAKE && ALPHA_PRIMO
+
+config ALPHA_EV56
+ prompt "EV56 CPU (speed >= 400MHz)?"
+ depends on ALPHA_RAWHIDE
+
+config ALPHA_PRIMO
+ bool "EV5 CPU daughtercard (model 5/xxx)?"
+ depends on ALPHA_NORITAKE || ALPHA_MIKASA
+ help
+ Say Y if you have an AS 1000 5/xxx or an AS 1000A 5/xxx.
+
+config ALPHA_GAMMA
+ bool "EV5 CPU(s) (model 5/xxx)?"
+ depends on ALPHA_SABLE
+ help
+ Say Y if you have an AS 2000 5/xxx or an AS 2100 5/xxx.
+
+config ALPHA_GAMMA
+ bool
+ depends on ALPHA_LYNX
+ default y
+
+config ALPHA_T2
+ bool
+ depends on ALPHA_SABLE || ALPHA_LYNX
+ default y
+
+config ALPHA_PYXIS
+ bool
+ depends on ALPHA_MIATA || ALPHA_LX164 || ALPHA_SX164 || ALPHA_RUFFIAN
+ default y
+
+config ALPHA_EV6
+ bool
+ depends on ALPHA_NAUTILUS || ALPHA_WILDFIRE || ALPHA_TITAN || ALPHA_SHARK || ALPHA_DP264 || ALPHA_EIGER || ALPHA_MARVEL
+ default y
+
+config ALPHA_TSUNAMI
+ bool
+ depends on ALPHA_SHARK || ALPHA_DP264 || ALPHA_EIGER
+ default y
+
+config ALPHA_EV67
+ bool "EV67 (or later) CPU (speed > 600MHz)?" if ALPHA_DP264 || ALPHA_EIGER
+ default y if ALPHA_NAUTILUS || ALPHA_WILDFIRE || ALPHA_TITAN || ALPHA_SHARK || ALPHA_MARVEL
+ help
+ Is this a machine based on the EV67 core? If in doubt, select N here
+ and the machine will be treated as an EV6.
+
+config ALPHA_EV7
+ bool
+ depends on ALPHA_MARVEL
+ default y
+
+config ALPHA_MCPCIA
+ bool
+ depends on ALPHA_RAWHIDE
+ default y
+
+config ALPHA_POLARIS
+ bool
+ depends on ALPHA_RX164
+ default y
+
+config ALPHA_IRONGATE
+ bool
+ depends on ALPHA_NAUTILUS
+ default y
+
+config ALPHA_AVANTI
+ bool
+ depends on ALPHA_XL || ALPHA_AVANTI_CH
+ default y
+ help
+ Avanti AS 200, AS 205, AS 250, AS 255, AS 300, and AS 400-based
+ Alphas. Info at
+ <http://www.unix-ag.org/Linux-Alpha/Architectures/Avanti.html>.
+
+config ALPHA_BROKEN_IRQ_MASK
+ bool
+ depends on ALPHA_GENERIC || ALPHA_PC164
+ default y
+
+config ALPHA_SRM
+ bool "Use SRM as bootloader" if ALPHA_CABRIOLET || ALPHA_AVANTI_CH || ALPHA_EB64P || ALPHA_PC164 || ALPHA_TAKARA || ALPHA_EB164 || ALPHA_ALCOR || ALPHA_MIATA || ALPHA_LX164 || ALPHA_SX164 || ALPHA_NAUTILUS || ALPHA_NONAME
+ default y if ALPHA_JENSEN || ALPHA_MIKASA || ALPHA_SABLE || ALPHA_LYNX || ALPHA_NORITAKE || ALPHA_DP264 || ALPHA_RAWHIDE || ALPHA_EIGER || ALPHA_WILDFIRE || ALPHA_TITAN || ALPHA_SHARK || ALPHA_MARVEL
+ ---help---
+ There are two different types of booting firmware on Alphas: SRM,
+ which is command line driven, and ARC, which uses menus and arrow
+ keys. Details about the Linux/Alpha booting process are contained in
+ the Linux/Alpha FAQ, accessible on the WWW from
+ <http://www.alphalinux.org/>.
+
+ The usual way to load Linux on an Alpha machine is to use MILO
+ (a bootloader that lets you pass command line parameters to the
+ kernel just like lilo does for the x86 architecture) which can be
+ loaded either from ARC or can be installed directly as a permanent
+ firmware replacement from floppy (which requires changing a certain
+ jumper on the motherboard). If you want to do either of these, say N
+ here. If MILO doesn't work on your system (true for Jensen
+ motherboards), you can bypass it altogether and boot Linux directly
+ from an SRM console; say Y here in order to do that. Note that you
+ won't be able to boot from an IDE disk using SRM.
+
+ If unsure, say N.
+
+config EISA
+ bool
+ depends on ALPHA_GENERIC || ALPHA_JENSEN || ALPHA_ALCOR || ALPHA_MIKASA || ALPHA_SABLE || ALPHA_LYNX || ALPHA_NORITAKE || ALPHA_RAWHIDE
+ default y
+
+config SMP
+ bool "Symmetric multi-processing support"
+ depends on ALPHA_SABLE || ALPHA_LYNX || ALPHA_RAWHIDE || ALPHA_DP264 || ALPHA_WILDFIRE || ALPHA_TITAN || ALPHA_GENERIC || ALPHA_SHARK || ALPHA_MARVEL
+ ---help---
+ This enables support for systems with more than one CPU. If you have
+ a system with only one CPU, like most personal computers, say N. If
+ you have a system with more than one CPU, say Y.
+
+ If you say N here, the kernel will run on single and multiprocessor
+ machines, but will use only one CPU of a multiprocessor machine. If
+ you say Y here, the kernel will run on many, but not all,
+ singleprocessor machines. On a singleprocessor machine, the kernel
+ will run faster if you say N here.
+
+ See also the <file:Documentation/smp.txt>, and the SMP-HOWTO
+ available at <http://www.tldp.org/docs.html#howto>.
+
+ If you don't know what to do here, say N.
+
+config HAVE_DEC_LOCK
+ bool
+ depends on SMP
+ default y
+
+config NR_CPUS
+ int "Maximum number of CPUs (2-64)"
+ range 2 64
+ depends on SMP
+ default "64"
+
+config DISCONTIGMEM
+ bool "Discontiguous Memory Support (EXPERIMENTAL)"
+ depends on EXPERIMENTAL
+ help
+ Say Y to upport efficient handling of discontiguous physical memory,
+ for architectures which are either NUMA (Non-Uniform Memory Access)
+ or have huge holes in the physical address space for other reasons.
+ See <file:Documentation/vm/numa> for more.
+
+config NUMA
+ bool "NUMA Support (EXPERIMENTAL)"
+ depends on DISCONTIGMEM
+ help
+ Say Y to compile the kernel to support NUMA (Non-Uniform Memory
+ Access). This option is for configuring high-end multiprocessor
+ server machines. If in doubt, say N.
+
+# LARGE_VMALLOC is racy, if you *really* need it then fix it first
+config ALPHA_LARGE_VMALLOC
+ bool
+ ---help---
+ Process creation and other aspects of virtual memory management can
+ be streamlined if we restrict the kernel to one PGD for all vmalloc
+ allocations. This equates to about 8GB.
+
+ Under normal circumstances, this is so far and above what is needed
+ as to be laughable. However, there are certain applications (such
+ as benchmark-grade in-kernel web serving) that can make use of as
+ much vmalloc space as is available.
+
+ Say N unless you know you need gobs and gobs of vmalloc space.
+
+config VERBOSE_MCHECK
+ bool "Verbose Machine Checks"
+
+config VERBOSE_MCHECK_ON
+ int "Verbose Printing Mode (0=off, 1=on, 2=all)"
+ depends on VERBOSE_MCHECK
+ default 1
+ ---help---
+ This option allows the default printing mode to be set, and then
+ possibly overridden by a boot command argument.
+
+ For example, if one wanted the option of printing verbose
+ machine checks, but wanted the default to be as if verbose
+ machine check printing was turned off, then one would choose
+ the printing mode to be 0. Then, upon reboot, one could add
+ the boot command line "verbose_mcheck=1" to get the normal
+ verbose machine check printing, or "verbose_mcheck=2" to get
+ the maximum information available.
+
+ Take the default (1) unless you want more control or more info.
+
+source "drivers/pci/Kconfig"
+source "drivers/eisa/Kconfig"
+
+source "drivers/pcmcia/Kconfig"
+
+config SRM_ENV
+ tristate "SRM environment through procfs"
+ depends on PROC_FS
+ ---help---
+ If you enable this option, a subdirectory inside /proc called
+ /proc/srm_environment will give you access to the all important
+ SRM environment variables (those which have a name) and also
+ to all others (by their internal number).
+
+ SRM is something like a BIOS for Alpha machines. There are some
+ other such BIOSes, like AlphaBIOS, which this driver cannot
+ support (hey, that's not SRM!).
+
+ Despite the fact that this driver doesn't work on all Alphas (but
+ only on those which have SRM as their firmware), it's save to
+ build it even if your particular machine doesn't know about SRM
+ (or if you intend to compile a generic kernel). It will simply
+ not create those subdirectory in /proc (and give you some warning,
+ of course).
+
+ This driver is also available as a module and will be called
+ srm_env then.
+
+source "fs/Kconfig.binfmt"
+
+endmenu
+
+source "drivers/Kconfig"
+
+source "fs/Kconfig"
+
+source "arch/alpha/oprofile/Kconfig"
+
+source "arch/alpha/Kconfig.debug"
+
+source "security/Kconfig"
+
+source "crypto/Kconfig"
+
+source "lib/Kconfig"
+
diff --git a/arch/alpha/Kconfig.debug b/arch/alpha/Kconfig.debug
new file mode 100644
index 000000000000..36d0106c32eb
--- /dev/null
+++ b/arch/alpha/Kconfig.debug
@@ -0,0 +1,59 @@
+menu "Kernel hacking"
+
+source "lib/Kconfig.debug"
+
+config EARLY_PRINTK
+ bool
+ depends on ALPHA_GENERIC || ALPHA_SRM
+ default y
+
+config DEBUG_RWLOCK
+ bool "Read-write spinlock debugging"
+ depends on DEBUG_KERNEL
+ help
+ If you say Y here then read-write lock processing will count how many
+ times it has tried to get the lock and issue an error message after
+ too many attempts. If you suspect a rwlock problem or a kernel
+ hacker asks for this option then say Y. Otherwise say N.
+
+config DEBUG_SEMAPHORE
+ bool "Semaphore debugging"
+ depends on DEBUG_KERNEL
+ help
+ If you say Y here then semaphore processing will issue lots of
+ verbose debugging messages. If you suspect a semaphore problem or a
+ kernel hacker asks for this option then say Y. Otherwise say N.
+
+config ALPHA_LEGACY_START_ADDRESS
+ bool "Legacy kernel start address"
+ depends on ALPHA_GENERIC
+ default n
+ ---help---
+ The 2.4 kernel changed the kernel start address from 0x310000
+ to 0x810000 to make room for the Wildfire's larger SRM console.
+ Recent consoles on Titan and Marvel machines also require the
+ extra room.
+
+ If you're using aboot 0.7 or later, the bootloader will examine the
+ ELF headers to determine where to transfer control. Unfortunately,
+ most older bootloaders -- APB or MILO -- hardcoded the kernel start
+ address rather than examining the ELF headers, and the result is a
+ hard lockup.
+
+ Say Y if you have a broken bootloader. Say N if you do not, or if
+ you wish to run on Wildfire, Titan, or Marvel.
+
+config ALPHA_LEGACY_START_ADDRESS
+ bool
+ depends on !ALPHA_GENERIC && !ALPHA_TITAN && !ALPHA_MARVEL && !ALPHA_WILDFIRE
+ default y
+
+config MATHEMU
+ tristate "Kernel FP software completion" if DEBUG_KERNEL && !SMP
+ default y if !DEBUG_KERNEL || SMP
+ help
+ This option is required for IEEE compliant floating point arithmetic
+ on the Alpha. The only time you would ever not say Y is to say M in
+ order to debug the code. Say Y unless you know what you are doing.
+
+endmenu
diff --git a/arch/alpha/Makefile b/arch/alpha/Makefile
new file mode 100644
index 000000000000..22ebfb2be0e4
--- /dev/null
+++ b/arch/alpha/Makefile
@@ -0,0 +1,130 @@
+#
+# alpha/Makefile
+#
+# This file is subject to the terms and conditions of the GNU General Public
+# License. See the file "COPYING" in the main directory of this archive
+# for more details.
+#
+# Copyright (C) 1994 by Linus Torvalds
+#
+
+NM := $(NM) -B
+
+LDFLAGS_vmlinux := -static -N #-relax
+CHECKFLAGS += -D__alpha__ -m64
+cflags-y := -pipe -mno-fp-regs -ffixed-8
+
+# Determine if we can use the BWX instructions with GAS.
+old_gas := $(shell if $(AS) --version 2>&1 | grep 'version 2.7' > /dev/null; then echo y; else echo n; fi)
+
+ifeq ($(old_gas),y)
+$(error The assembler '$(AS)' does not support the BWX instruction)
+endif
+
+# Determine if GCC understands the -mcpu= option.
+have_mcpu := $(call cc-option-yn, -mcpu=ev5)
+have_mcpu_pca56 := $(call cc-option-yn, -mcpu=pca56)
+have_mcpu_ev6 := $(call cc-option-yn, -mcpu=ev6)
+have_mcpu_ev67 := $(call cc-option-yn, -mcpu=ev67)
+have_msmall_data := $(call cc-option-yn, -msmall-data)
+
+cflags-$(have_msmall_data) += -msmall-data
+
+# Turn on the proper cpu optimizations.
+ifeq ($(have_mcpu),y)
+ mcpu_done := n
+ # If GENERIC, make sure to turn off any instruction set extensions that
+ # the host compiler might have on by default. Given that EV4 and EV5
+ # have the same instruction set, prefer EV5 because an EV5 schedule is
+ # more likely to keep an EV4 processor busy than vice-versa.
+ ifeq ($(CONFIG_ALPHA_GENERIC),y)
+ mcpu := ev5
+ mcpu_done := y
+ endif
+ ifeq ($(mcpu_done)$(CONFIG_ALPHA_SX164)$(have_mcpu_pca56),nyy)
+ mcpu := pca56
+ mcpu_done := y
+ endif
+ ifeq ($(mcpu_done)$(CONFIG_ALPHA_POLARIS)$(have_mcpu_pca56),nyy)
+ mcpu := pca56
+ mcpu_done := y
+ endif
+ ifeq ($(mcpu_done)$(CONFIG_ALPHA_EV4),ny)
+ mcpu := ev4
+ mcpu_done := y
+ endif
+ ifeq ($(mcpu_done)$(CONFIG_ALPHA_EV56),ny)
+ mcpu := ev56
+ mcpu_done := y
+ endif
+ ifeq ($(mcpu_done)$(CONFIG_ALPHA_EV5),ny)
+ mcpu := ev5
+ mcpu_done := y
+ endif
+ ifeq ($(mcpu_done)$(CONFIG_ALPHA_EV67)$(have_mcpu_ev67),nyy)
+ mcpu := ev67
+ mcpu_done := y
+ endif
+ ifeq ($(mcpu_done)$(CONFIG_ALPHA_EV6),ny)
+ ifeq ($(have_mcpu_ev6),y)
+ mcpu := ev6
+ else
+ ifeq ($(have_mcpu_pca56),y)
+ mcpu := pca56
+ else
+ mcpu=ev56
+ endif
+ endif
+ mcpu_done := y
+ endif
+ cflags-$(mcpu_done) += -mcpu=$(mcpu)
+endif
+
+
+# For TSUNAMI, we must have the assembler not emulate our instructions.
+# The same is true for IRONGATE, POLARIS, PYXIS.
+# BWX is most important, but we don't really want any emulation ever.
+CFLAGS += $(cflags-y) -Wa,-mev6
+
+head-y := arch/alpha/kernel/head.o
+
+core-y += arch/alpha/kernel/ arch/alpha/mm/
+core-$(CONFIG_MATHEMU) += arch/alpha/math-emu/
+drivers-$(CONFIG_OPROFILE) += arch/alpha/oprofile/
+libs-y += arch/alpha/lib/
+
+# export what is needed by arch/alpha/boot/Makefile
+LIBS_Y := $(patsubst %/, %/lib.a, $(libs-y))
+export LIBS_Y
+
+boot := arch/alpha/boot
+
+#Default target when executing make with no arguments
+all boot: $(boot)/vmlinux.gz
+
+$(boot)/vmlinux.gz: vmlinux
+ $(Q)$(MAKE) $(build)=$(boot) $@
+
+bootimage bootpfile bootpzfile: vmlinux
+ $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
+
+
+prepare: include/asm-$(ARCH)/asm_offsets.h
+
+arch/$(ARCH)/kernel/asm-offsets.s: include/asm include/linux/version.h \
+ include/config/MARKER
+
+include/asm-$(ARCH)/asm_offsets.h: arch/$(ARCH)/kernel/asm-offsets.s
+ $(call filechk,gen-asm-offsets)
+
+archclean:
+ $(Q)$(MAKE) $(clean)=$(boot)
+
+CLEAN_FILES += include/asm-$(ARCH)/asm_offsets.h
+
+define archhelp
+ echo '* boot - Compressed kernel image (arch/alpha/boot/vmlinux.gz)'
+ echo ' bootimage - SRM bootable image (arch/alpha/boot/bootimage)'
+ echo ' bootpfile - BOOTP bootable image (arch/alpha/boot/bootpfile)'
+ echo ' bootpzfile - compressed kernel BOOTP image (arch/alpha/boot/bootpzfile)'
+endef
diff --git a/arch/alpha/boot/Makefile b/arch/alpha/boot/Makefile
new file mode 100644
index 000000000000..e1ae14cd2b4e
--- /dev/null
+++ b/arch/alpha/boot/Makefile
@@ -0,0 +1,116 @@
+#
+# arch/alpha/boot/Makefile
+#
+# This file is subject to the terms and conditions of the GNU General Public
+# License. See the file "COPYING" in the main directory of this archive
+# for more details.
+#
+# Copyright (C) 1994 by Linus Torvalds
+#
+
+hostprogs-y := tools/mkbb tools/objstrip
+targets := vmlinux.gz vmlinux \
+ vmlinux.nh tools/lxboot tools/bootlx tools/bootph \
+ tools/bootpzh bootloader bootpheader bootpzheader
+OBJSTRIP := $(obj)/tools/objstrip
+
+# SRM bootable image. Copy to offset 512 of a partition.
+$(obj)/bootimage: $(addprefix $(obj)/tools/,mkbb lxboot bootlx) $(obj)/vmlinux.nh
+ ( cat $(obj)/tools/lxboot $(obj)/tools/bootlx $(obj)/vmlinux.nh ) > $@
+ $(obj)/tools/mkbb $@ $(obj)/tools/lxboot
+ @echo ' Bootimage $@ is ready'
+
+# BOOTP bootable image. Define INITRD during make to append initrd image.
+$(obj)/bootpfile: $(obj)/tools/bootph $(obj)/vmlinux.nh
+ cat $(obj)/tools/bootph $(obj)/vmlinux.nh > $@
+ifdef INITRD
+ cat $(INITRD) >> $@
+endif
+
+# Compressed kernel BOOTP bootable image.
+# Define INITRD during make to append initrd image.
+$(obj)/bootpzfile: $(obj)/tools/bootpzh $(obj)/vmlinux.nh.gz
+ cat $(obj)/tools/bootpzh $(obj)/vmlinux.nh.gz > $@
+ifdef INITRD
+ cat $(INITRD) >> $@
+endif
+
+# Compressed kernel image
+$(obj)/vmlinux.gz: $(obj)/vmlinux FORCE
+ $(call if_changed,gzip)
+ @echo ' Kernel $@ is ready'
+
+$(obj)/main.o: $(obj)/ksize.h
+$(obj)/bootp.o: $(obj)/ksize.h
+$(obj)/bootpz.o: $(obj)/kzsize.h
+
+$(obj)/ksize.h: $(obj)/vmlinux.nh FORCE
+ echo "#define KERNEL_SIZE `ls -l $(obj)/vmlinux.nh | awk '{print $$5}'`" > $@T
+ifdef INITRD
+ [ -f $(INITRD) ] || exit 1
+ echo "#define INITRD_IMAGE_SIZE `ls -l $(INITRD) | awk '{print $$5}'`" >> $@T
+endif
+ cmp -s $@T $@ || mv -f $@T $@
+ rm -f $@T
+
+$(obj)/kzsize.h: $(obj)/vmlinux.nh.gz FORCE
+ echo "#define KERNEL_SIZE `ls -l $(obj)/vmlinux.nh | awk '{print $$5}'`" > $@T
+ echo "#define KERNEL_Z_SIZE `ls -l $(obj)/vmlinux.nh.gz | awk '{print $$5}'`" >> $@T
+ifdef INITRD
+ [ -f $(INITRD) ] || exit 1
+ echo "#define INITRD_IMAGE_SIZE `ls -l $(INITRD) | awk '{print $$5}'`" >> $@T
+endif
+ cmp -s $@T $@ || mv -f $@T $@
+ rm -f $@T
+
+quiet_cmd_strip = STRIP $@
+ cmd_strip = $(STRIP) -o $@ $<
+
+$(obj)/vmlinux: vmlinux FORCE
+ $(call if_changed,strip)
+
+quiet_cmd_objstrip = OBJSTRIP $@
+ cmd_objstrip = $(OBJSTRIP) $(OSFLAGS_$(@F)) $< $@
+
+OSFLAGS_vmlinux.nh := -v
+OSFLAGS_lxboot := -p
+OSFLAGS_bootlx := -vb
+OSFLAGS_bootph := -vb
+OSFLAGS_bootpzh := -vb
+
+$(obj)/vmlinux.nh: vmlinux $(OBJSTRIP) FORCE
+ $(call if_changed,objstrip)
+
+$(obj)/vmlinux.nh.gz: $(obj)/vmlinux.nh FORCE
+ $(call if_changed,gzip)
+
+$(obj)/tools/lxboot: $(obj)/bootloader $(OBJSTRIP) FORCE
+ $(call if_changed,objstrip)
+
+$(obj)/tools/bootlx: $(obj)/bootloader $(OBJSTRIP) FORCE
+ $(call if_changed,objstrip)
+
+$(obj)/tools/bootph: $(obj)/bootpheader $(OBJSTRIP) FORCE
+ $(call if_changed,objstrip)
+
+$(obj)/tools/bootpzh: $(obj)/bootpzheader $(OBJSTRIP) FORCE
+ $(call if_changed,objstrip)
+
+LDFLAGS_bootloader := -static -uvsprintf -T #-N -relax
+LDFLAGS_bootpheader := -static -uvsprintf -T #-N -relax
+LDFLAGS_bootpzheader := -static -uvsprintf -T #-N -relax
+
+OBJ_bootlx := $(obj)/head.o $(obj)/main.o
+OBJ_bootph := $(obj)/head.o $(obj)/bootp.o
+OBJ_bootpzh := $(obj)/head.o $(obj)/bootpz.o $(obj)/misc.o
+
+$(obj)/bootloader: $(obj)/bootloader.lds $(OBJ_bootlx) FORCE
+ $(call if_changed,ld)
+
+$(obj)/bootpheader: $(obj)/bootloader.lds $(OBJ_bootph) $(LIBS_Y) FORCE
+ $(call if_changed,ld)
+
+$(obj)/bootpzheader: $(obj)/bootloader.lds $(OBJ_bootpzh) $(LIBS_Y) FORCE
+ $(call if_changed,ld)
+
+$(obj)/misc.o: lib/inflate.c
diff --git a/arch/alpha/boot/bootloader.lds b/arch/alpha/boot/bootloader.lds
new file mode 100644
index 000000000000..31c081ce1d50
--- /dev/null
+++ b/arch/alpha/boot/bootloader.lds
@@ -0,0 +1,24 @@
+OUTPUT_FORMAT("elf64-alpha")
+ENTRY(__start)
+printk = srm_printk;
+SECTIONS
+{
+ . = 0x20000000;
+ .text : { *(.text) }
+ _etext = .;
+ PROVIDE (etext = .);
+ .rodata : { *(.rodata) *(.rodata.*) }
+ .data : { *(.data) CONSTRUCTORS }
+ .got : { *(.got) }
+ .sdata : { *(.sdata) }
+ _edata = .;
+ PROVIDE (edata = .);
+ .sbss : { *(.sbss) *(.scommon) }
+ .bss : { *(.bss) *(COMMON) }
+ _end = . ;
+ PROVIDE (end = .);
+
+ .mdebug 0 : { *(.mdebug) }
+ .note 0 : { *(.note) }
+ .comment 0 : { *(.comment) }
+}
diff --git a/arch/alpha/boot/bootp.c b/arch/alpha/boot/bootp.c
new file mode 100644
index 000000000000..ec53c28e33de
--- /dev/null
+++ b/arch/alpha/boot/bootp.c
@@ -0,0 +1,214 @@
+/*
+ * arch/alpha/boot/bootp.c
+ *
+ * Copyright (C) 1997 Jay Estabrook
+ *
+ * This file is used for creating a bootp file for the Linux/AXP kernel
+ *
+ * based significantly on the arch/alpha/boot/main.c of Linus Torvalds
+ */
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/version.h>
+#include <linux/mm.h>
+
+#include <asm/system.h>
+#include <asm/console.h>
+#include <asm/hwrpb.h>
+#include <asm/pgtable.h>
+#include <asm/io.h>
+
+#include <stdarg.h>
+
+#include "ksize.h"
+
+extern unsigned long switch_to_osf_pal(unsigned long nr,
+ struct pcb_struct * pcb_va, struct pcb_struct * pcb_pa,
+ unsigned long *vptb);
+
+extern void move_stack(unsigned long new_stack);
+
+struct hwrpb_struct *hwrpb = INIT_HWRPB;
+static struct pcb_struct pcb_va[1];
+
+/*
+ * Find a physical address of a virtual object..
+ *
+ * This is easy using the virtual page table address.
+ */
+
+static inline void *
+find_pa(unsigned long *vptb, void *ptr)
+{
+ unsigned long address = (unsigned long) ptr;
+ unsigned long result;
+
+ result = vptb[address >> 13];
+ result >>= 32;
+ result <<= 13;
+ result |= address & 0x1fff;
+ return (void *) result;
+}
+
+/*
+ * This function moves into OSF/1 pal-code, and has a temporary
+ * PCB for that. The kernel proper should replace this PCB with
+ * the real one as soon as possible.
+ *
+ * The page table muckery in here depends on the fact that the boot
+ * code has the L1 page table identity-map itself in the second PTE
+ * in the L1 page table. Thus the L1-page is virtually addressable
+ * itself (through three levels) at virtual address 0x200802000.
+ */
+
+#define VPTB ((unsigned long *) 0x200000000)
+#define L1 ((unsigned long *) 0x200802000)
+
+void
+pal_init(void)
+{
+ unsigned long i, rev;
+ struct percpu_struct * percpu;
+ struct pcb_struct * pcb_pa;
+
+ /* Create the dummy PCB. */
+ pcb_va->ksp = 0;
+ pcb_va->usp = 0;
+ pcb_va->ptbr = L1[1] >> 32;
+ pcb_va->asn = 0;
+ pcb_va->pcc = 0;
+ pcb_va->unique = 0;
+ pcb_va->flags = 1;
+ pcb_va->res1 = 0;
+ pcb_va->res2 = 0;
+ pcb_pa = find_pa(VPTB, pcb_va);
+
+ /*
+ * a0 = 2 (OSF)
+ * a1 = return address, but we give the asm the vaddr of the PCB
+ * a2 = physical addr of PCB
+ * a3 = new virtual page table pointer
+ * a4 = KSP (but the asm sets it)
+ */
+ srm_printk("Switching to OSF PAL-code .. ");
+
+ i = switch_to_osf_pal(2, pcb_va, pcb_pa, VPTB);
+ if (i) {
+ srm_printk("failed, code %ld\n", i);
+ __halt();
+ }
+
+ percpu = (struct percpu_struct *)
+ (INIT_HWRPB->processor_offset + (unsigned long) INIT_HWRPB);
+ rev = percpu->pal_revision = percpu->palcode_avail[2];
+
+ srm_printk("Ok (rev %lx)\n", rev);
+
+ tbia(); /* do it directly in case we are SMP */
+}
+
+static inline void
+load(unsigned long dst, unsigned long src, unsigned long count)
+{
+ memcpy((void *)dst, (void *)src, count);
+}
+
+/*
+ * Start the kernel.
+ */
+static inline void
+runkernel(void)
+{
+ __asm__ __volatile__(
+ "bis %0,%0,$27\n\t"
+ "jmp ($27)"
+ : /* no outputs: it doesn't even return */
+ : "r" (START_ADDR));
+}
+
+extern char _end;
+#define KERNEL_ORIGIN \
+ ((((unsigned long)&_end) + 511) & ~511)
+
+void
+start_kernel(void)
+{
+ /*
+ * Note that this crufty stuff with static and envval
+ * and envbuf is because:
+ *
+ * 1. Frequently, the stack is short, and we don't want to overrun;
+ * 2. Frequently the stack is where we are going to copy the kernel to;
+ * 3. A certain SRM console required the GET_ENV output to stack.
+ * ??? A comment in the aboot sources indicates that the GET_ENV
+ * destination must be quadword aligned. Might this explain the
+ * behaviour, rather than requiring output to the stack, which
+ * seems rather far-fetched.
+ */
+ static long nbytes;
+ static char envval[256] __attribute__((aligned(8)));
+ static unsigned long initrd_start;
+
+ srm_printk("Linux/AXP bootp loader for Linux " UTS_RELEASE "\n");
+ if (INIT_HWRPB->pagesize != 8192) {
+ srm_printk("Expected 8kB pages, got %ldkB\n",
+ INIT_HWRPB->pagesize >> 10);
+ return;
+ }
+ if (INIT_HWRPB->vptb != (unsigned long) VPTB) {
+ srm_printk("Expected vptb at %p, got %p\n",
+ VPTB, (void *)INIT_HWRPB->vptb);
+ return;
+ }
+ pal_init();
+
+ /* The initrd must be page-aligned. See below for the
+ cause of the magic number 5. */
+ initrd_start = ((START_ADDR + 5*KERNEL_SIZE + PAGE_SIZE) |
+ (PAGE_SIZE-1)) + 1;
+#ifdef INITRD_IMAGE_SIZE
+ srm_printk("Initrd positioned at %#lx\n", initrd_start);
+#endif
+
+ /*
+ * Move the stack to a safe place to ensure it won't be
+ * overwritten by kernel image.
+ */
+ move_stack(initrd_start - PAGE_SIZE);
+
+ nbytes = callback_getenv(ENV_BOOTED_OSFLAGS, envval, sizeof(envval));
+ if (nbytes < 0 || nbytes >= sizeof(envval)) {
+ nbytes = 0;
+ }
+ envval[nbytes] = '\0';
+ srm_printk("Loading the kernel...'%s'\n", envval);
+
+ /* NOTE: *no* callbacks or printouts from here on out!!! */
+
+ /* This is a hack, as some consoles seem to get virtual 20000000 (ie
+ * where the SRM console puts the kernel bootp image) memory
+ * overlapping physical memory where the kernel wants to be put,
+ * which causes real problems when attempting to copy the former to
+ * the latter... :-(
+ *
+ * So, we first move the kernel virtual-to-physical way above where
+ * we physically want the kernel to end up, then copy it from there
+ * to its final resting place... ;-}
+ *
+ * Sigh... */
+
+#ifdef INITRD_IMAGE_SIZE
+ load(initrd_start, KERNEL_ORIGIN+KERNEL_SIZE, INITRD_IMAGE_SIZE);
+#endif
+ load(START_ADDR+(4*KERNEL_SIZE), KERNEL_ORIGIN, KERNEL_SIZE);
+ load(START_ADDR, START_ADDR+(4*KERNEL_SIZE), KERNEL_SIZE);
+
+ memset((char*)ZERO_PGE, 0, PAGE_SIZE);
+ strcpy((char*)ZERO_PGE, envval);
+#ifdef INITRD_IMAGE_SIZE
+ ((long *)(ZERO_PGE+256))[0] = initrd_start;
+ ((long *)(ZERO_PGE+256))[1] = INITRD_IMAGE_SIZE;
+#endif
+
+ runkernel();
+}
diff --git a/arch/alpha/boot/bootpz.c b/arch/alpha/boot/bootpz.c
new file mode 100644
index 000000000000..a6657f2cf9bd
--- /dev/null
+++ b/arch/alpha/boot/bootpz.c
@@ -0,0 +1,469 @@
+/*
+ * arch/alpha/boot/bootpz.c
+ *
+ * Copyright (C) 1997 Jay Estabrook
+ *
+ * This file is used for creating a compressed BOOTP file for the
+ * Linux/AXP kernel
+ *
+ * based significantly on the arch/alpha/boot/main.c of Linus Torvalds
+ * and the decompression code from MILO.
+ */
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/version.h>
+#include <linux/mm.h>
+
+#include <asm/system.h>
+#include <asm/console.h>
+#include <asm/hwrpb.h>
+#include <asm/pgtable.h>
+#include <asm/io.h>
+
+#include <stdarg.h>
+
+#include "kzsize.h"
+
+/* FIXME FIXME FIXME */
+#define MALLOC_AREA_SIZE 0x200000 /* 2MB for now */
+/* FIXME FIXME FIXME */
+
+
+/*
+ WARNING NOTE
+
+ It is very possible that turning on additional messages may cause
+ kernel image corruption due to stack usage to do the printing.
+
+*/
+
+#undef DEBUG_CHECK_RANGE
+#undef DEBUG_ADDRESSES
+#undef DEBUG_LAST_STEPS
+
+extern unsigned long switch_to_osf_pal(unsigned long nr,
+ struct pcb_struct * pcb_va, struct pcb_struct * pcb_pa,
+ unsigned long *vptb);
+
+extern int decompress_kernel(void* destination, void *source,
+ size_t ksize, size_t kzsize);
+
+extern void move_stack(unsigned long new_stack);
+
+struct hwrpb_struct *hwrpb = INIT_HWRPB;
+static struct pcb_struct pcb_va[1];
+
+/*
+ * Find a physical address of a virtual object..
+ *
+ * This is easy using the virtual page table address.
+ */
+#define VPTB ((unsigned long *) 0x200000000)
+
+static inline unsigned long
+find_pa(unsigned long address)
+{
+ unsigned long result;
+
+ result = VPTB[address >> 13];
+ result >>= 32;
+ result <<= 13;
+ result |= address & 0x1fff;
+ return result;
+}
+
+int
+check_range(unsigned long vstart, unsigned long vend,
+ unsigned long kstart, unsigned long kend)
+{
+ unsigned long vaddr, kaddr;
+
+#ifdef DEBUG_CHECK_RANGE
+ srm_printk("check_range: V[0x%lx:0x%lx] K[0x%lx:0x%lx]\n",
+ vstart, vend, kstart, kend);
+#endif
+ /* do some range checking for detecting an overlap... */
+ for (vaddr = vstart; vaddr <= vend; vaddr += PAGE_SIZE)
+ {
+ kaddr = (find_pa(vaddr) | PAGE_OFFSET);
+ if (kaddr >= kstart && kaddr <= kend)
+ {
+#ifdef DEBUG_CHECK_RANGE
+ srm_printk("OVERLAP: vaddr 0x%lx kaddr 0x%lx"
+ " [0x%lx:0x%lx]\n",
+ vaddr, kaddr, kstart, kend);
+#endif
+ return 1;
+ }
+ }
+ return 0;
+}
+
+/*
+ * This function moves into OSF/1 pal-code, and has a temporary
+ * PCB for that. The kernel proper should replace this PCB with
+ * the real one as soon as possible.
+ *
+ * The page table muckery in here depends on the fact that the boot
+ * code has the L1 page table identity-map itself in the second PTE
+ * in the L1 page table. Thus the L1-page is virtually addressable
+ * itself (through three levels) at virtual address 0x200802000.
+ */
+
+#define L1 ((unsigned long *) 0x200802000)
+
+void
+pal_init(void)
+{
+ unsigned long i, rev;
+ struct percpu_struct * percpu;
+ struct pcb_struct * pcb_pa;
+
+ /* Create the dummy PCB. */
+ pcb_va->ksp = 0;
+ pcb_va->usp = 0;
+ pcb_va->ptbr = L1[1] >> 32;
+ pcb_va->asn = 0;
+ pcb_va->pcc = 0;
+ pcb_va->unique = 0;
+ pcb_va->flags = 1;
+ pcb_va->res1 = 0;
+ pcb_va->res2 = 0;
+ pcb_pa = (struct pcb_struct *)find_pa((unsigned long)pcb_va);
+
+ /*
+ * a0 = 2 (OSF)
+ * a1 = return address, but we give the asm the vaddr of the PCB
+ * a2 = physical addr of PCB
+ * a3 = new virtual page table pointer
+ * a4 = KSP (but the asm sets it)
+ */
+ srm_printk("Switching to OSF PAL-code... ");
+
+ i = switch_to_osf_pal(2, pcb_va, pcb_pa, VPTB);
+ if (i) {
+ srm_printk("failed, code %ld\n", i);
+ __halt();
+ }
+
+ percpu = (struct percpu_struct *)
+ (INIT_HWRPB->processor_offset + (unsigned long) INIT_HWRPB);
+ rev = percpu->pal_revision = percpu->palcode_avail[2];
+
+ srm_printk("OK (rev %lx)\n", rev);
+
+ tbia(); /* do it directly in case we are SMP */
+}
+
+/*
+ * Start the kernel.
+ */
+static inline void
+runkernel(void)
+{
+ __asm__ __volatile__(
+ "bis %0,%0,$27\n\t"
+ "jmp ($27)"
+ : /* no outputs: it doesn't even return */
+ : "r" (START_ADDR));
+}
+
+/* Must record the SP (it is virtual) on entry, so we can make sure
+ not to overwrite it during movement or decompression. */
+unsigned long SP_on_entry;
+
+/* Calculate the kernel image address based on the end of the BOOTP
+ bootstrapper (ie this program).
+*/
+extern char _end;
+#define KERNEL_ORIGIN \
+ ((((unsigned long)&_end) + 511) & ~511)
+
+/* Round address to next higher page boundary. */
+#define NEXT_PAGE(a) (((a) | (PAGE_SIZE - 1)) + 1)
+
+#ifdef INITRD_IMAGE_SIZE
+# define REAL_INITRD_SIZE INITRD_IMAGE_SIZE
+#else
+# define REAL_INITRD_SIZE 0
+#endif
+
+/* Defines from include/asm-alpha/system.h
+
+ BOOT_ADDR Virtual address at which the consoles loads
+ the BOOTP image.
+
+ KERNEL_START KSEG address at which the kernel is built to run,
+ which includes some initial data pages before the
+ code.
+
+ START_ADDR KSEG address of the entry point of kernel code.
+
+ ZERO_PGE KSEG address of page full of zeroes, but
+ upon entry to kerne cvan be expected
+ to hold the parameter list and possible
+ INTRD information.
+
+ These are used in the local defines below.
+*/
+
+
+/* Virtual addresses for the BOOTP image. Note that this includes the
+ bootstrapper code as well as the compressed kernel image, and
+ possibly the INITRD image.
+
+ Oh, and do NOT forget the STACK, which appears to be placed virtually
+ beyond the end of the loaded image.
+*/
+#define V_BOOT_IMAGE_START BOOT_ADDR
+#define V_BOOT_IMAGE_END SP_on_entry
+
+/* Virtual addresses for just the bootstrapper part of the BOOTP image. */
+#define V_BOOTSTRAPPER_START BOOT_ADDR
+#define V_BOOTSTRAPPER_END KERNEL_ORIGIN
+
+/* Virtual addresses for just the data part of the BOOTP
+ image. This may also include the INITRD image, but always
+ includes the STACK.
+*/
+#define V_DATA_START KERNEL_ORIGIN
+#define V_INITRD_START (KERNEL_ORIGIN + KERNEL_Z_SIZE)
+#define V_INTRD_END (V_INITRD_START + REAL_INITRD_SIZE)
+#define V_DATA_END V_BOOT_IMAGE_END
+
+/* KSEG addresses for the uncompressed kernel.
+
+ Note that the end address includes workspace for the decompression.
+ Note also that the DATA_START address is ZERO_PGE, to which we write
+ just before jumping to the kernel image at START_ADDR.
+ */
+#define K_KERNEL_DATA_START ZERO_PGE
+#define K_KERNEL_IMAGE_START START_ADDR
+#define K_KERNEL_IMAGE_END (START_ADDR + KERNEL_SIZE)
+
+/* Define to where we may have to decompress the kernel image, before
+ we move it to the final position, in case of overlap. This will be
+ above the final position of the kernel.
+
+ Regardless of overlap, we move the INITRD image to the end of this
+ copy area, because there needs to be a buffer area after the kernel
+ for "bootmem" anyway.
+*/
+#define K_COPY_IMAGE_START NEXT_PAGE(K_KERNEL_IMAGE_END)
+/* Reserve one page below INITRD for the new stack. */
+#define K_INITRD_START \
+ NEXT_PAGE(K_COPY_IMAGE_START + KERNEL_SIZE + PAGE_SIZE)
+#define K_COPY_IMAGE_END \
+ (K_INITRD_START + REAL_INITRD_SIZE + MALLOC_AREA_SIZE)
+#define K_COPY_IMAGE_SIZE \
+ NEXT_PAGE(K_COPY_IMAGE_END - K_COPY_IMAGE_START)
+
+void
+start_kernel(void)
+{
+ int must_move = 0;
+
+ /* Initialize these for the decompression-in-place situation,
+ which is the smallest amount of work and most likely to
+ occur when using the normal START_ADDR of the kernel
+ (currently set to 16MB, to clear all console code.
+ */
+ unsigned long uncompressed_image_start = K_KERNEL_IMAGE_START;
+ unsigned long uncompressed_image_end = K_KERNEL_IMAGE_END;
+
+ unsigned long initrd_image_start = K_INITRD_START;
+
+ /*
+ * Note that this crufty stuff with static and envval
+ * and envbuf is because:
+ *
+ * 1. Frequently, the stack is short, and we don't want to overrun;
+ * 2. Frequently the stack is where we are going to copy the kernel to;
+ * 3. A certain SRM console required the GET_ENV output to stack.
+ * ??? A comment in the aboot sources indicates that the GET_ENV
+ * destination must be quadword aligned. Might this explain the
+ * behaviour, rather than requiring output to the stack, which
+ * seems rather far-fetched.
+ */
+ static long nbytes;
+ static char envval[256] __attribute__((aligned(8)));
+ register unsigned long asm_sp asm("30");
+
+ SP_on_entry = asm_sp;
+
+ srm_printk("Linux/Alpha BOOTPZ Loader for Linux " UTS_RELEASE "\n");
+
+ /* Validity check the HWRPB. */
+ if (INIT_HWRPB->pagesize != 8192) {
+ srm_printk("Expected 8kB pages, got %ldkB\n",
+ INIT_HWRPB->pagesize >> 10);
+ return;
+ }
+ if (INIT_HWRPB->vptb != (unsigned long) VPTB) {
+ srm_printk("Expected vptb at %p, got %p\n",
+ VPTB, (void *)INIT_HWRPB->vptb);
+ return;
+ }
+
+ /* PALcode (re)initialization. */
+ pal_init();
+
+ /* Get the parameter list from the console environment variable. */
+ nbytes = callback_getenv(ENV_BOOTED_OSFLAGS, envval, sizeof(envval));
+ if (nbytes < 0 || nbytes >= sizeof(envval)) {
+ nbytes = 0;
+ }
+ envval[nbytes] = '\0';
+
+#ifdef DEBUG_ADDRESSES
+ srm_printk("START_ADDR 0x%lx\n", START_ADDR);
+ srm_printk("KERNEL_ORIGIN 0x%lx\n", KERNEL_ORIGIN);
+ srm_printk("KERNEL_SIZE 0x%x\n", KERNEL_SIZE);
+ srm_printk("KERNEL_Z_SIZE 0x%x\n", KERNEL_Z_SIZE);
+#endif
+
+ /* Since all the SRM consoles load the BOOTP image at virtual
+ * 0x20000000, we have to ensure that the physical memory
+ * pages occupied by that image do NOT overlap the physical
+ * address range where the kernel wants to be run. This
+ * causes real problems when attempting to cdecompress the
+ * former into the latter... :-(
+ *
+ * So, we may have to decompress/move the kernel/INITRD image
+ * virtual-to-physical someplace else first before moving
+ * kernel /INITRD to their final resting places... ;-}
+ *
+ * Sigh...
+ */
+
+ /* First, check to see if the range of addresses occupied by
+ the bootstrapper part of the BOOTP image include any of the
+ physical pages into which the kernel will be placed for
+ execution.
+
+ We only need check on the final kernel image range, since we
+ will put the INITRD someplace that we can be sure is not
+ in conflict.
+ */
+ if (check_range(V_BOOTSTRAPPER_START, V_BOOTSTRAPPER_END,
+ K_KERNEL_DATA_START, K_KERNEL_IMAGE_END))
+ {
+ srm_printk("FATAL ERROR: overlap of bootstrapper code\n");
+ __halt();
+ }
+
+ /* Next, check to see if the range of addresses occupied by
+ the compressed kernel/INITRD/stack portion of the BOOTP
+ image include any of the physical pages into which the
+ decompressed kernel or the INITRD will be placed for
+ execution.
+ */
+ if (check_range(V_DATA_START, V_DATA_END,
+ K_KERNEL_IMAGE_START, K_COPY_IMAGE_END))
+ {
+#ifdef DEBUG_ADDRESSES
+ srm_printk("OVERLAP: cannot decompress in place\n");
+#endif
+ uncompressed_image_start = K_COPY_IMAGE_START;
+ uncompressed_image_end = K_COPY_IMAGE_END;
+ must_move = 1;
+
+ /* Finally, check to see if the range of addresses
+ occupied by the compressed kernel/INITRD part of
+ the BOOTP image include any of the physical pages
+ into which that part is to be copied for
+ decompression.
+ */
+ while (check_range(V_DATA_START, V_DATA_END,
+ uncompressed_image_start,
+ uncompressed_image_end))
+ {
+#if 0
+ uncompressed_image_start += K_COPY_IMAGE_SIZE;
+ uncompressed_image_end += K_COPY_IMAGE_SIZE;
+ initrd_image_start += K_COPY_IMAGE_SIZE;
+#else
+ /* Keep as close as possible to end of BOOTP image. */
+ uncompressed_image_start += PAGE_SIZE;
+ uncompressed_image_end += PAGE_SIZE;
+ initrd_image_start += PAGE_SIZE;
+#endif
+ }
+ }
+
+ srm_printk("Starting to load the kernel with args '%s'\n", envval);
+
+#ifdef DEBUG_ADDRESSES
+ srm_printk("Decompressing the kernel...\n"
+ "...from 0x%lx to 0x%lx size 0x%x\n",
+ V_DATA_START,
+ uncompressed_image_start,
+ KERNEL_SIZE);
+#endif
+ decompress_kernel((void *)uncompressed_image_start,
+ (void *)V_DATA_START,
+ KERNEL_SIZE, KERNEL_Z_SIZE);
+
+ /*
+ * Now, move things to their final positions, if/as required.
+ */
+
+#ifdef INITRD_IMAGE_SIZE
+
+ /* First, we always move the INITRD image, if present. */
+#ifdef DEBUG_ADDRESSES
+ srm_printk("Moving the INITRD image...\n"
+ " from 0x%lx to 0x%lx size 0x%x\n",
+ V_INITRD_START,
+ initrd_image_start,
+ INITRD_IMAGE_SIZE);
+#endif
+ memcpy((void *)initrd_image_start, (void *)V_INITRD_START,
+ INITRD_IMAGE_SIZE);
+
+#endif /* INITRD_IMAGE_SIZE */
+
+ /* Next, we may have to move the uncompressed kernel to the
+ final destination.
+ */
+ if (must_move) {
+#ifdef DEBUG_ADDRESSES
+ srm_printk("Moving the uncompressed kernel...\n"
+ "...from 0x%lx to 0x%lx size 0x%x\n",
+ uncompressed_image_start,
+ K_KERNEL_IMAGE_START,
+ (unsigned)KERNEL_SIZE);
+#endif
+ /*
+ * Move the stack to a safe place to ensure it won't be
+ * overwritten by kernel image.
+ */
+ move_stack(initrd_image_start - PAGE_SIZE);
+
+ memcpy((void *)K_KERNEL_IMAGE_START,
+ (void *)uncompressed_image_start, KERNEL_SIZE);
+ }
+
+ /* Clear the zero page, then move the argument list in. */
+#ifdef DEBUG_LAST_STEPS
+ srm_printk("Preparing ZERO_PGE...\n");
+#endif
+ memset((char*)ZERO_PGE, 0, PAGE_SIZE);
+ strcpy((char*)ZERO_PGE, envval);
+
+#ifdef INITRD_IMAGE_SIZE
+
+#ifdef DEBUG_LAST_STEPS
+ srm_printk("Preparing INITRD info...\n");
+#endif
+ /* Finally, set the INITRD paramenters for the kernel. */
+ ((long *)(ZERO_PGE+256))[0] = initrd_image_start;
+ ((long *)(ZERO_PGE+256))[1] = INITRD_IMAGE_SIZE;
+
+#endif /* INITRD_IMAGE_SIZE */
+
+#ifdef DEBUG_LAST_STEPS
+ srm_printk("Doing 'runkernel()'...\n");
+#endif
+ runkernel();
+}
diff --git a/arch/alpha/boot/head.S b/arch/alpha/boot/head.S
new file mode 100644
index 000000000000..f3d98089b3dc
--- /dev/null
+++ b/arch/alpha/boot/head.S
@@ -0,0 +1,123 @@
+/*
+ * arch/alpha/boot/head.S
+ *
+ * initial bootloader stuff..
+ */
+
+#include <asm/system.h>
+
+ .set noreorder
+ .globl __start
+ .ent __start
+__start:
+ br $29,2f
+2: ldgp $29,0($29)
+ jsr $26,start_kernel
+ call_pal PAL_halt
+ .end __start
+
+ .align 5
+ .globl wrent
+ .ent wrent
+wrent:
+ .prologue 0
+ call_pal PAL_wrent
+ ret ($26)
+ .end wrent
+
+ .align 5
+ .globl wrkgp
+ .ent wrkgp
+wrkgp:
+ .prologue 0
+ call_pal PAL_wrkgp
+ ret ($26)
+ .end wrkgp
+
+ .align 5
+ .globl switch_to_osf_pal
+ .ent switch_to_osf_pal
+switch_to_osf_pal:
+ subq $30,128,$30
+ .frame $30,128,$26
+ stq $26,0($30)
+ stq $1,8($30)
+ stq $2,16($30)
+ stq $3,24($30)
+ stq $4,32($30)
+ stq $5,40($30)
+ stq $6,48($30)
+ stq $7,56($30)
+ stq $8,64($30)
+ stq $9,72($30)
+ stq $10,80($30)
+ stq $11,88($30)
+ stq $12,96($30)
+ stq $13,104($30)
+ stq $14,112($30)
+ stq $15,120($30)
+ .prologue 0
+
+ stq $30,0($17) /* save KSP in PCB */
+
+ bis $30,$30,$20 /* a4 = KSP */
+ br $17,1f
+
+ ldq $26,0($30)
+ ldq $1,8($30)
+ ldq $2,16($30)
+ ldq $3,24($30)
+ ldq $4,32($30)
+ ldq $5,40($30)
+ ldq $6,48($30)
+ ldq $7,56($30)
+ ldq $8,64($30)
+ ldq $9,72($30)
+ ldq $10,80($30)
+ ldq $11,88($30)
+ ldq $12,96($30)
+ ldq $13,104($30)
+ ldq $14,112($30)
+ ldq $15,120($30)
+ addq $30,128,$30
+ ret ($26)
+1: call_pal PAL_swppal
+ .end switch_to_osf_pal
+
+ .align 3
+ .globl tbi
+ .ent tbi
+tbi:
+ .prologue 0
+ call_pal PAL_tbi
+ ret ($26)
+ .end tbi
+
+ .align 3
+ .globl halt
+ .ent halt
+halt:
+ .prologue 0
+ call_pal PAL_halt
+ .end halt
+
+/* $16 - new stack page */
+ .align 3
+ .globl move_stack
+ .ent move_stack
+move_stack:
+ .prologue 0
+ lda $0, 0x1fff($31)
+ and $0, $30, $1 /* Stack offset */
+ or $1, $16, $16 /* New stack pointer */
+ mov $30, $1
+ mov $16, $2
+1: ldq $3, 0($1) /* Move the stack */
+ addq $1, 8, $1
+ stq $3, 0($2)
+ and $0, $1, $4
+ addq $2, 8, $2
+ bne $4, 1b
+ mov $16, $30
+ ret ($26)
+ .end move_stack
diff --git a/arch/alpha/boot/main.c b/arch/alpha/boot/main.c
new file mode 100644
index 000000000000..78c9b0b6eea7
--- /dev/null
+++ b/arch/alpha/boot/main.c
@@ -0,0 +1,191 @@
+/*
+ * arch/alpha/boot/main.c
+ *
+ * Copyright (C) 1994, 1995 Linus Torvalds
+ *
+ * This file is the bootloader for the Linux/AXP kernel
+ */
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/version.h>
+#include <linux/mm.h>
+
+#include <asm/system.h>
+#include <asm/console.h>
+#include <asm/hwrpb.h>
+#include <asm/pgtable.h>
+
+#include <stdarg.h>
+
+#include "ksize.h"
+
+extern int vsprintf(char *, const char *, va_list);
+extern unsigned long switch_to_osf_pal(unsigned long nr,
+ struct pcb_struct * pcb_va, struct pcb_struct * pcb_pa,
+ unsigned long *vptb);
+struct hwrpb_struct *hwrpb = INIT_HWRPB;
+static struct pcb_struct pcb_va[1];
+
+/*
+ * Find a physical address of a virtual object..
+ *
+ * This is easy using the virtual page table address.
+ */
+
+static inline void *
+find_pa(unsigned long *vptb, void *ptr)
+{
+ unsigned long address = (unsigned long) ptr;
+ unsigned long result;
+
+ result = vptb[address >> 13];
+ result >>= 32;
+ result <<= 13;
+ result |= address & 0x1fff;
+ return (void *) result;
+}
+
+/*
+ * This function moves into OSF/1 pal-code, and has a temporary
+ * PCB for that. The kernel proper should replace this PCB with
+ * the real one as soon as possible.
+ *
+ * The page table muckery in here depends on the fact that the boot
+ * code has the L1 page table identity-map itself in the second PTE
+ * in the L1 page table. Thus the L1-page is virtually addressable
+ * itself (through three levels) at virtual address 0x200802000.
+ */
+
+#define VPTB ((unsigned long *) 0x200000000)
+#define L1 ((unsigned long *) 0x200802000)
+
+void
+pal_init(void)
+{
+ unsigned long i, rev;
+ struct percpu_struct * percpu;
+ struct pcb_struct * pcb_pa;
+
+ /* Create the dummy PCB. */
+ pcb_va->ksp = 0;
+ pcb_va->usp = 0;
+ pcb_va->ptbr = L1[1] >> 32;
+ pcb_va->asn = 0;
+ pcb_va->pcc = 0;
+ pcb_va->unique = 0;
+ pcb_va->flags = 1;
+ pcb_va->res1 = 0;
+ pcb_va->res2 = 0;
+ pcb_pa = find_pa(VPTB, pcb_va);
+
+ /*
+ * a0 = 2 (OSF)
+ * a1 = return address, but we give the asm the vaddr of the PCB
+ * a2 = physical addr of PCB
+ * a3 = new virtual page table pointer
+ * a4 = KSP (but the asm sets it)
+ */
+ srm_printk("Switching to OSF PAL-code .. ");
+
+ i = switch_to_osf_pal(2, pcb_va, pcb_pa, VPTB);
+ if (i) {
+ srm_printk("failed, code %ld\n", i);
+ __halt();
+ }
+
+ percpu = (struct percpu_struct *)
+ (INIT_HWRPB->processor_offset + (unsigned long) INIT_HWRPB);
+ rev = percpu->pal_revision = percpu->palcode_avail[2];
+
+ srm_printk("Ok (rev %lx)\n", rev);
+
+ tbia(); /* do it directly in case we are SMP */
+}
+
+static inline long openboot(void)
+{
+ char bootdev[256];
+ long result;
+
+ result = callback_getenv(ENV_BOOTED_DEV, bootdev, 255);
+ if (result < 0)
+ return result;
+ return callback_open(bootdev, result & 255);
+}
+
+static inline long close(long dev)
+{
+ return callback_close(dev);
+}
+
+static inline long load(long dev, unsigned long addr, unsigned long count)
+{
+ char bootfile[256];
+ extern char _end;
+ long result, boot_size = &_end - (char *) BOOT_ADDR;
+
+ result = callback_getenv(ENV_BOOTED_FILE, bootfile, 255);
+ if (result < 0)
+ return result;
+ result &= 255;
+ bootfile[result] = '\0';
+ if (result)
+ srm_printk("Boot file specification (%s) not implemented\n",
+ bootfile);
+ return callback_read(dev, count, addr, boot_size/512 + 1);
+}
+
+/*
+ * Start the kernel.
+ */
+static void runkernel(void)
+{
+ __asm__ __volatile__(
+ "bis %1,%1,$30\n\t"
+ "bis %0,%0,$26\n\t"
+ "ret ($26)"
+ : /* no outputs: it doesn't even return */
+ : "r" (START_ADDR),
+ "r" (PAGE_SIZE + INIT_STACK));
+}
+
+void start_kernel(void)
+{
+ long i;
+ long dev;
+ int nbytes;
+ char envval[256];
+
+ srm_printk("Linux/AXP bootloader for Linux " UTS_RELEASE "\n");
+ if (INIT_HWRPB->pagesize != 8192) {
+ srm_printk("Expected 8kB pages, got %ldkB\n", INIT_HWRPB->pagesize >> 10);
+ return;
+ }
+ pal_init();
+ dev = openboot();
+ if (dev < 0) {
+ srm_printk("Unable to open boot device: %016lx\n", dev);
+ return;
+ }
+ dev &= 0xffffffff;
+ srm_printk("Loading vmlinux ...");
+ i = load(dev, START_ADDR, KERNEL_SIZE);
+ close(dev);
+ if (i != KERNEL_SIZE) {
+ srm_printk("Failed (%lx)\n", i);
+ return;
+ }
+
+ nbytes = callback_getenv(ENV_BOOTED_OSFLAGS, envval, sizeof(envval));
+ if (nbytes < 0) {
+ nbytes = 0;
+ }
+ envval[nbytes] = '\0';
+ strcpy((char*)ZERO_PGE, envval);
+
+ srm_printk(" Ok\nNow booting the kernel\n");
+ runkernel();
+ for (i = 0 ; i < 0x100000000 ; i++)
+ /* nothing */;
+ __halt();
+}
diff --git a/arch/alpha/boot/misc.c b/arch/alpha/boot/misc.c
new file mode 100644
index 000000000000..1d65adf5691e
--- /dev/null
+++ b/arch/alpha/boot/misc.c
@@ -0,0 +1,207 @@
+/*
+ * misc.c
+ *
+ * This is a collection of several routines from gzip-1.0.3
+ * adapted for Linux.
+ *
+ * malloc by Hannu Savolainen 1993 and Matthias Urlichs 1994
+ *
+ * Modified for ARM Linux by Russell King
+ *
+ * Nicolas Pitre <nico@visuaide.com> 1999/04/14 :
+ * For this code to run directly from Flash, all constant variables must
+ * be marked with 'const' and all other variables initialized at run-time
+ * only. This way all non constant variables will end up in the bss segment,
+ * which should point to addresses in RAM and cleared to 0 on start.
+ * This allows for a much quicker boot time.
+ *
+ * Modified for Alpha, from the ARM version, by Jay Estabrook 2003.
+ */
+
+#include <linux/kernel.h>
+
+#include <asm/uaccess.h>
+
+#define memzero(s,n) memset ((s),0,(n))
+#define puts srm_printk
+extern long srm_printk(const char *, ...)
+ __attribute__ ((format (printf, 1, 2)));
+
+/*
+ * gzip delarations
+ */
+#define OF(args) args
+#define STATIC static
+
+typedef unsigned char uch;
+typedef unsigned short ush;
+typedef unsigned long ulg;
+
+#define WSIZE 0x8000 /* Window size must be at least 32k, */
+ /* and a power of two */
+
+static uch *inbuf; /* input buffer */
+static uch *window; /* Sliding window buffer */
+
+static unsigned insize; /* valid bytes in inbuf */
+static unsigned inptr; /* index of next byte to be processed in inbuf */
+static unsigned outcnt; /* bytes in output buffer */
+
+/* gzip flag byte */
+#define ASCII_FLAG 0x01 /* bit 0 set: file probably ascii text */
+#define CONTINUATION 0x02 /* bit 1 set: continuation of multi-part gzip file */
+#define EXTRA_FIELD 0x04 /* bit 2 set: extra field present */
+#define ORIG_NAME 0x08 /* bit 3 set: original file name present */
+#define COMMENT 0x10 /* bit 4 set: file comment present */
+#define ENCRYPTED 0x20 /* bit 5 set: file is encrypted */
+#define RESERVED 0xC0 /* bit 6,7: reserved */
+
+#define get_byte() (inptr < insize ? inbuf[inptr++] : fill_inbuf())
+
+/* Diagnostic functions */
+#ifdef DEBUG
+# define Assert(cond,msg) {if(!(cond)) error(msg);}
+# define Trace(x) fprintf x
+# define Tracev(x) {if (verbose) fprintf x ;}
+# define Tracevv(x) {if (verbose>1) fprintf x ;}
+# define Tracec(c,x) {if (verbose && (c)) fprintf x ;}
+# define Tracecv(c,x) {if (verbose>1 && (c)) fprintf x ;}
+#else
+# define Assert(cond,msg)
+# define Trace(x)
+# define Tracev(x)
+# define Tracevv(x)
+# define Tracec(c,x)
+# define Tracecv(c,x)
+#endif
+
+static int fill_inbuf(void);
+static void flush_window(void);
+static void error(char *m);
+static void gzip_mark(void **);
+static void gzip_release(void **);
+
+static char *input_data;
+static int input_data_size;
+
+static uch *output_data;
+static ulg output_ptr;
+static ulg bytes_out;
+
+static void *malloc(int size);
+static void free(void *where);
+static void error(char *m);
+static void gzip_mark(void **);
+static void gzip_release(void **);
+
+extern int end;
+static ulg free_mem_ptr;
+static ulg free_mem_ptr_end;
+
+#define HEAP_SIZE 0x2000
+
+#include "../../../lib/inflate.c"
+
+static void *malloc(int size)
+{
+ void *p;
+
+ if (size <0) error("Malloc error");
+ if (free_mem_ptr <= 0) error("Memory error");
+
+ free_mem_ptr = (free_mem_ptr + 3) & ~3; /* Align */
+
+ p = (void *)free_mem_ptr;
+ free_mem_ptr += size;
+
+ if (free_mem_ptr >= free_mem_ptr_end)
+ error("Out of memory");
+ return p;
+}
+
+static void free(void *where)
+{ /* gzip_mark & gzip_release do the free */
+}
+
+static void gzip_mark(void **ptr)
+{
+ *ptr = (void *) free_mem_ptr;
+}
+
+static void gzip_release(void **ptr)
+{
+ free_mem_ptr = (long) *ptr;
+}
+
+/* ===========================================================================
+ * Fill the input buffer. This is called only when the buffer is empty
+ * and at least one byte is really needed.
+ */
+int fill_inbuf(void)
+{
+ if (insize != 0)
+ error("ran out of input data");
+
+ inbuf = input_data;
+ insize = input_data_size;
+
+ inptr = 1;
+ return inbuf[0];
+}
+
+/* ===========================================================================
+ * Write the output window window[0..outcnt-1] and update crc and bytes_out.
+ * (Used for the decompressed data only.)
+ */
+void flush_window(void)
+{
+ ulg c = crc;
+ unsigned n;
+ uch *in, *out, ch;
+
+ in = window;
+ out = &output_data[output_ptr];
+ for (n = 0; n < outcnt; n++) {
+ ch = *out++ = *in++;
+ c = crc_32_tab[((int)c ^ ch) & 0xff] ^ (c >> 8);
+ }
+ crc = c;
+ bytes_out += (ulg)outcnt;
+ output_ptr += (ulg)outcnt;
+ outcnt = 0;
+/* puts("."); */
+}
+
+static void error(char *x)
+{
+ puts("\n\n");
+ puts(x);
+ puts("\n\n -- System halted");
+
+ while(1); /* Halt */
+}
+
+unsigned int
+decompress_kernel(void *output_start,
+ void *input_start,
+ size_t ksize,
+ size_t kzsize)
+{
+ output_data = (uch *)output_start;
+ input_data = (uch *)input_start;
+ input_data_size = kzsize; /* use compressed size */
+
+ /* FIXME FIXME FIXME */
+ free_mem_ptr = (ulg)output_start + ksize;
+ free_mem_ptr_end = (ulg)output_start + ksize + 0x200000;
+ /* FIXME FIXME FIXME */
+
+ /* put in temp area to reduce initial footprint */
+ window = malloc(WSIZE);
+
+ makecrc();
+/* puts("Uncompressing Linux..."); */
+ gunzip();
+/* puts(" done, booting the kernel.\n"); */
+ return output_ptr;
+}
diff --git a/arch/alpha/boot/tools/mkbb.c b/arch/alpha/boot/tools/mkbb.c
new file mode 100644
index 000000000000..23c7190b047c
--- /dev/null
+++ b/arch/alpha/boot/tools/mkbb.c
@@ -0,0 +1,151 @@
+/* This utility makes a bootblock suitable for the SRM console/miniloader */
+
+/* Usage:
+ * mkbb <device> <lxboot>
+ *
+ * Where <device> is the name of the device to install the bootblock on,
+ * and <lxboot> is the name of a bootblock to merge in. This bootblock
+ * contains the offset and size of the bootloader. It must be exactly
+ * 512 bytes long.
+ */
+
+#include <fcntl.h>
+#include <unistd.h>
+#include <stdio.h>
+
+/* Minimal definition of disklabel, so we don't have to include
+ * asm/disklabel.h (confuses make)
+ */
+#ifndef MAXPARTITIONS
+#define MAXPARTITIONS 8 /* max. # of partitions */
+#endif
+
+#ifndef u8
+#define u8 unsigned char
+#endif
+
+#ifndef u16
+#define u16 unsigned short
+#endif
+
+#ifndef u32
+#define u32 unsigned int
+#endif
+
+struct disklabel {
+ u32 d_magic; /* must be DISKLABELMAGIC */
+ u16 d_type, d_subtype;
+ u8 d_typename[16];
+ u8 d_packname[16];
+ u32 d_secsize;
+ u32 d_nsectors;
+ u32 d_ntracks;
+ u32 d_ncylinders;
+ u32 d_secpercyl;
+ u32 d_secprtunit;
+ u16 d_sparespertrack;
+ u16 d_sparespercyl;
+ u32 d_acylinders;
+ u16 d_rpm, d_interleave, d_trackskew, d_cylskew;
+ u32 d_headswitch, d_trkseek, d_flags;
+ u32 d_drivedata[5];
+ u32 d_spare[5];
+ u32 d_magic2; /* must be DISKLABELMAGIC */
+ u16 d_checksum;
+ u16 d_npartitions;
+ u32 d_bbsize, d_sbsize;
+ struct d_partition {
+ u32 p_size;
+ u32 p_offset;
+ u32 p_fsize;
+ u8 p_fstype;
+ u8 p_frag;
+ u16 p_cpg;
+ } d_partitions[MAXPARTITIONS];
+};
+
+
+typedef union __bootblock {
+ struct {
+ char __pad1[64];
+ struct disklabel __label;
+ } __u1;
+ struct {
+ unsigned long __pad2[63];
+ unsigned long __checksum;
+ } __u2;
+ char bootblock_bytes[512];
+ unsigned long bootblock_quadwords[64];
+} bootblock;
+
+#define bootblock_label __u1.__label
+#define bootblock_checksum __u2.__checksum
+
+main(int argc, char ** argv)
+{
+ bootblock bootblock_from_disk;
+ bootblock bootloader_image;
+ int dev, fd;
+ int i;
+ int nread;
+
+ /* Make sure of the arg count */
+ if(argc != 3) {
+ fprintf(stderr, "Usage: %s device lxboot\n", argv[0]);
+ exit(0);
+ }
+
+ /* First, open the device and make sure it's accessible */
+ dev = open(argv[1], O_RDWR);
+ if(dev < 0) {
+ perror(argv[1]);
+ exit(0);
+ }
+
+ /* Now open the lxboot and make sure it's reasonable */
+ fd = open(argv[2], O_RDONLY);
+ if(fd < 0) {
+ perror(argv[2]);
+ close(dev);
+ exit(0);
+ }
+
+ /* Read in the lxboot */
+ nread = read(fd, &bootloader_image, sizeof(bootblock));
+ if(nread != sizeof(bootblock)) {
+ perror("lxboot read");
+ fprintf(stderr, "expected %d, got %d\n", sizeof(bootblock), nread);
+ exit(0);
+ }
+
+ /* Read in the bootblock from disk. */
+ nread = read(dev, &bootblock_from_disk, sizeof(bootblock));
+ if(nread != sizeof(bootblock)) {
+ perror("bootblock read");
+ fprintf(stderr, "expected %d, got %d\n", sizeof(bootblock), nread);
+ exit(0);
+ }
+
+ /* Swap the bootblock's disklabel into the bootloader */
+ bootloader_image.bootblock_label = bootblock_from_disk.bootblock_label;
+
+ /* Calculate the bootblock checksum */
+ bootloader_image.bootblock_checksum = 0;
+ for(i = 0; i < 63; i++) {
+ bootloader_image.bootblock_checksum +=
+ bootloader_image.bootblock_quadwords[i];
+ }
+
+ /* Write the whole thing out! */
+ lseek(dev, 0L, SEEK_SET);
+ if(write(dev, &bootloader_image, sizeof(bootblock)) != sizeof(bootblock)) {
+ perror("bootblock write");
+ exit(0);
+ }
+
+ close(fd);
+ close(dev);
+ exit(0);
+}
+
+
diff --git a/arch/alpha/boot/tools/objstrip.c b/arch/alpha/boot/tools/objstrip.c
new file mode 100644
index 000000000000..67beb1b45e4f
--- /dev/null
+++ b/arch/alpha/boot/tools/objstrip.c
@@ -0,0 +1,281 @@
+/*
+ * arch/alpha/boot/tools/objstrip.c
+ *
+ * Strip the object file headers/trailers from an executable (ELF or ECOFF).
+ *
+ * Copyright (C) 1996 David Mosberger-Tang.
+ */
+/*
+ * Converts an ECOFF or ELF object file into a bootable file. The
+ * object file must be a OMAGIC file (i.e., data and bss follow immediately
+ * behind the text). See DEC "Assembly Language Programmer's Guide"
+ * documentation for details. The SRM boot process is documented in
+ * the Alpha AXP Architecture Reference Manual, Second Edition by
+ * Richard L. Sites and Richard T. Witek.
+ */
+#include <stdio.h>
+#include <string.h>
+#include <stdlib.h>
+#include <unistd.h>
+
+#include <sys/fcntl.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+
+#include <linux/a.out.h>
+#include <linux/coff.h>
+#include <linux/param.h>
+#include <linux/string.h>
+#ifdef __ELF__
+# include <linux/elf.h>
+#endif
+
+/* bootfile size must be multiple of BLOCK_SIZE: */
+#define BLOCK_SIZE 512
+
+const char * prog_name;
+
+
+void
+usage (void)
+{
+ fprintf(stderr,
+ "usage: %s [-v] -p file primary\n"
+ " %s [-vb] file [secondary]\n", prog_name, prog_name);
+ exit(1);
+}
+
+
+int
+main (int argc, char *argv[])
+{
+ size_t nwritten, tocopy, n, mem_size, fil_size, pad = 0;
+ int fd, ofd, i, j, verbose = 0, primary = 0;
+ char buf[8192], *inname;
+ struct exec * aout; /* includes file & aout header */
+ long offset;
+#ifdef __ELF__
+ struct elfhdr *elf;
+ struct elf_phdr *elf_phdr; /* program header */
+ unsigned long long e_entry;
+#endif
+
+ prog_name = argv[0];
+
+ for (i = 1; i < argc && argv[i][0] == '-'; ++i) {
+ for (j = 1; argv[i][j]; ++j) {
+ switch (argv[i][j]) {
+ case 'v':
+ verbose = ~verbose;
+ break;
+
+ case 'b':
+ pad = BLOCK_SIZE;
+ break;
+
+ case 'p':
+ primary = 1; /* make primary bootblock */
+ break;
+ }
+ }
+ }
+
+ if (i >= argc) {
+ usage();
+ }
+ inname = argv[i++];
+
+ fd = open(inname, O_RDONLY);
+ if (fd == -1) {
+ perror("open");
+ exit(1);
+ }
+
+ ofd = 1;
+ if (i < argc) {
+ ofd = open(argv[i++], O_WRONLY | O_CREAT | O_TRUNC, 0666);
+ if (fd == -1) {
+ perror("open");
+ exit(1);
+ }
+ }
+
+ if (primary) {
+ /* generate bootblock for primary loader */
+
+ unsigned long bb[64], sum = 0;
+ struct stat st;
+ off_t size;
+ int i;
+
+ if (ofd == 1) {
+ usage();
+ }
+
+ if (fstat(fd, &st) == -1) {
+ perror("fstat");
+ exit(1);
+ }
+
+ size = (st.st_size + BLOCK_SIZE - 1) & ~(BLOCK_SIZE - 1);
+ memset(bb, 0, sizeof(bb));
+ strcpy((char *) bb, "Linux SRM bootblock");
+ bb[60] = size / BLOCK_SIZE; /* count */
+ bb[61] = 1; /* starting sector # */
+ bb[62] = 0; /* flags---must be 0 */
+ for (i = 0; i < 63; ++i) {
+ sum += bb[i];
+ }
+ bb[63] = sum;
+ if (write(ofd, bb, sizeof(bb)) != sizeof(bb)) {
+ perror("boot-block write");
+ exit(1);
+ }
+ printf("%lu\n", size);
+ return 0;
+ }
+
+ /* read and inspect exec header: */
+
+ if (read(fd, buf, sizeof(buf)) < 0) {
+ perror("read");
+ exit(1);
+ }
+
+#ifdef __ELF__
+ elf = (struct elfhdr *) buf;
+
+ if (elf->e_ident[0] == 0x7f && strncmp(elf->e_ident + 1, "ELF", 3) == 0) {
+ if (elf->e_type != ET_EXEC) {
+ fprintf(stderr, "%s: %s is not an ELF executable\n",
+ prog_name, inname);
+ exit(1);
+ }
+ if (!elf_check_arch(elf)) {
+ fprintf(stderr, "%s: is not for this processor (e_machine=%d)\n",
+ prog_name, elf->e_machine);
+ exit(1);
+ }
+ if (elf->e_phnum != 1) {
+ fprintf(stderr,
+ "%s: %d program headers (forgot to link with -N?)\n",
+ prog_name, elf->e_phnum);
+ }
+
+ e_entry = elf->e_entry;
+
+ lseek(fd, elf->e_phoff, SEEK_SET);
+ if (read(fd, buf, sizeof(*elf_phdr)) != sizeof(*elf_phdr)) {
+ perror("read");
+ exit(1);
+ }
+
+ elf_phdr = (struct elf_phdr *) buf;
+ offset = elf_phdr->p_offset;
+ mem_size = elf_phdr->p_memsz;
+ fil_size = elf_phdr->p_filesz;
+
+ /* work around ELF bug: */
+ if (elf_phdr->p_vaddr < e_entry) {
+ unsigned long delta = e_entry - elf_phdr->p_vaddr;
+ offset += delta;
+ mem_size -= delta;
+ fil_size -= delta;
+ elf_phdr->p_vaddr += delta;
+ }
+
+ if (verbose) {
+ fprintf(stderr, "%s: extracting %#016lx-%#016lx (at %lx)\n",
+ prog_name, (long) elf_phdr->p_vaddr,
+ elf_phdr->p_vaddr + fil_size, offset);
+ }
+ } else
+#endif
+ {
+ aout = (struct exec *) buf;
+
+ if (!(aout->fh.f_flags & COFF_F_EXEC)) {
+ fprintf(stderr, "%s: %s is not in executable format\n",
+ prog_name, inname);
+ exit(1);
+ }
+
+ if (aout->fh.f_opthdr != sizeof(aout->ah)) {
+ fprintf(stderr, "%s: %s has unexpected optional header size\n",
+ prog_name, inname);
+ exit(1);
+ }
+
+ if (N_MAGIC(*aout) != OMAGIC) {
+ fprintf(stderr, "%s: %s is not an OMAGIC file\n",
+ prog_name, inname);
+ exit(1);
+ }
+ offset = N_TXTOFF(*aout);
+ fil_size = aout->ah.tsize + aout->ah.dsize;
+ mem_size = fil_size + aout->ah.bsize;
+
+ if (verbose) {
+ fprintf(stderr, "%s: extracting %#016lx-%#016lx (at %lx)\n",
+ prog_name, aout->ah.text_start,
+ aout->ah.text_start + fil_size, offset);
+ }
+ }
+
+ if (lseek(fd, offset, SEEK_SET) != offset) {
+ perror("lseek");
+ exit(1);
+ }
+
+ if (verbose) {
+ fprintf(stderr, "%s: copying %lu byte from %s\n",
+ prog_name, (unsigned long) fil_size, inname);
+ }
+
+ tocopy = fil_size;
+ while (tocopy > 0) {
+ n = tocopy;
+ if (n > sizeof(buf)) {
+ n = sizeof(buf);
+ }
+ tocopy -= n;
+ if ((size_t) read(fd, buf, n) != n) {
+ perror("read");
+ exit(1);
+ }
+ do {
+ nwritten = write(ofd, buf, n);
+ if ((ssize_t) nwritten == -1) {
+ perror("write");
+ exit(1);
+ }
+ n -= nwritten;
+ } while (n > 0);
+ }
+
+ if (pad) {
+ mem_size = ((mem_size + pad - 1) / pad) * pad;
+ }
+
+ tocopy = mem_size - fil_size;
+ if (tocopy > 0) {
+ fprintf(stderr,
+ "%s: zero-filling bss and aligning to %lu with %lu bytes\n",
+ prog_name, pad, (unsigned long) tocopy);
+
+ memset(buf, 0x00, sizeof(buf));
+ do {
+ n = tocopy;
+ if (n > sizeof(buf)) {
+ n = sizeof(buf);
+ }
+ nwritten = write(ofd, buf, n);
+ if ((ssize_t) nwritten == -1) {
+ perror("write");
+ exit(1);
+ }
+ tocopy -= nwritten;
+ } while (tocopy > 0);
+ }
+ return 0;
+}
diff --git a/arch/alpha/defconfig b/arch/alpha/defconfig
new file mode 100644
index 000000000000..5e39b7a7c8f4
--- /dev/null
+++ b/arch/alpha/defconfig
@@ -0,0 +1,927 @@
+#
+# Automatically generated make config: don't edit
+# Linux kernel version: 2.6.9-rc2
+# Sat Sep 25 15:38:35 2004
+#
+CONFIG_ALPHA=y
+CONFIG_64BIT=y
+CONFIG_MMU=y
+CONFIG_RWSEM_XCHGADD_ALGORITHM=y
+CONFIG_GENERIC_ISA_DMA=y
+# CONFIG_GENERIC_IOMAP is not set
+
+#
+# Code maturity level options
+#
+CONFIG_EXPERIMENTAL=y
+CONFIG_CLEAN_COMPILE=y
+CONFIG_BROKEN_ON_SMP=y
+
+#
+# General setup
+#
+CONFIG_LOCALVERSION=""
+CONFIG_SWAP=y
+CONFIG_SYSVIPC=y
+CONFIG_POSIX_MQUEUE=y
+# CONFIG_BSD_PROCESS_ACCT is not set
+CONFIG_SYSCTL=y
+# CONFIG_AUDIT is not set
+CONFIG_LOG_BUF_SHIFT=14
+# CONFIG_HOTPLUG is not set
+# CONFIG_IKCONFIG is not set
+# CONFIG_EMBEDDED is not set
+CONFIG_KALLSYMS=y
+CONFIG_KALLSYMS_ALL=y
+# CONFIG_KALLSYMS_EXTRA_PASS is not set
+CONFIG_FUTEX=y
+CONFIG_EPOLL=y
+CONFIG_IOSCHED_NOOP=y
+CONFIG_IOSCHED_AS=y
+CONFIG_IOSCHED_DEADLINE=y
+CONFIG_IOSCHED_CFQ=y
+# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
+CONFIG_SHMEM=y
+# CONFIG_TINY_SHMEM is not set
+
+#
+# Loadable module support
+#
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+# CONFIG_MODULE_FORCE_UNLOAD is not set
+CONFIG_OBSOLETE_MODPARM=y
+# CONFIG_MODVERSIONS is not set
+CONFIG_KMOD=y
+
+#
+# System setup
+#
+CONFIG_ALPHA_GENERIC=y
+# CONFIG_ALPHA_ALCOR is not set
+# CONFIG_ALPHA_XL is not set
+# CONFIG_ALPHA_BOOK1 is not set
+# CONFIG_ALPHA_AVANTI_CH is not set
+# CONFIG_ALPHA_CABRIOLET is not set
+# CONFIG_ALPHA_DP264 is not set
+# CONFIG_ALPHA_EB164 is not set
+# CONFIG_ALPHA_EB64P_CH is not set
+# CONFIG_ALPHA_EB66 is not set
+# CONFIG_ALPHA_EB66P is not set
+# CONFIG_ALPHA_EIGER is not set
+# CONFIG_ALPHA_JENSEN is not set
+# CONFIG_ALPHA_LX164 is not set
+# CONFIG_ALPHA_LYNX is not set
+# CONFIG_ALPHA_MARVEL is not set
+# CONFIG_ALPHA_MIATA is not set
+# CONFIG_ALPHA_MIKASA is not set
+# CONFIG_ALPHA_NAUTILUS is not set
+# CONFIG_ALPHA_NONAME_CH is not set
+# CONFIG_ALPHA_NORITAKE is not set
+# CONFIG_ALPHA_PC164 is not set
+# CONFIG_ALPHA_P2K is not set
+# CONFIG_ALPHA_RAWHIDE is not set
+# CONFIG_ALPHA_RUFFIAN is not set
+# CONFIG_ALPHA_RX164 is not set
+# CONFIG_ALPHA_SX164 is not set
+# CONFIG_ALPHA_SABLE is not set
+# CONFIG_ALPHA_SHARK is not set
+# CONFIG_ALPHA_TAKARA is not set
+# CONFIG_ALPHA_TITAN is not set
+# CONFIG_ALPHA_WILDFIRE is not set
+CONFIG_ISA=y
+CONFIG_PCI=y
+CONFIG_PCI_DOMAINS=y
+CONFIG_ALPHA_CORE_AGP=y
+CONFIG_ALPHA_BROKEN_IRQ_MASK=y
+CONFIG_EISA=y
+# CONFIG_SMP is not set
+# CONFIG_DISCONTIGMEM is not set
+CONFIG_VERBOSE_MCHECK=y
+CONFIG_VERBOSE_MCHECK_ON=1
+CONFIG_PCI_LEGACY_PROC=y
+CONFIG_PCI_NAMES=y
+CONFIG_EISA_PCI_EISA=y
+CONFIG_EISA_VIRTUAL_ROOT=y
+CONFIG_EISA_NAMES=y
+CONFIG_SRM_ENV=m
+CONFIG_BINFMT_ELF=y
+# CONFIG_BINFMT_AOUT is not set
+# CONFIG_BINFMT_EM86 is not set
+# CONFIG_BINFMT_MISC is not set
+
+#
+# Device Drivers
+#
+
+#
+# Generic Driver Options
+#
+CONFIG_STANDALONE=y
+CONFIG_PREVENT_FIRMWARE_BUILD=y
+# CONFIG_DEBUG_DRIVER is not set
+
+#
+# Memory Technology Devices (MTD)
+#
+# CONFIG_MTD is not set
+
+#
+# Parallel port support
+#
+# CONFIG_PARPORT is not set
+
+#
+# Plug and Play support
+#
+CONFIG_PNP=y
+# CONFIG_PNP_DEBUG is not set
+
+#
+# Protocols
+#
+CONFIG_ISAPNP=y
+
+#
+# Block devices
+#
+CONFIG_BLK_DEV_FD=y
+# CONFIG_BLK_DEV_XD is not set
+# CONFIG_BLK_CPQ_DA is not set
+# CONFIG_BLK_CPQ_CISS_DA is not set
+# CONFIG_BLK_DEV_DAC960 is not set
+# CONFIG_BLK_DEV_UMEM is not set
+CONFIG_BLK_DEV_LOOP=m
+# CONFIG_BLK_DEV_CRYPTOLOOP is not set
+# CONFIG_BLK_DEV_NBD is not set
+# CONFIG_BLK_DEV_SX8 is not set
+# CONFIG_BLK_DEV_RAM is not set
+
+#
+# ATA/ATAPI/MFM/RLL support
+#
+CONFIG_IDE=y
+CONFIG_IDE_MAX_HWIFS=4
+CONFIG_BLK_DEV_IDE=y
+
+#
+# Please see Documentation/ide.txt for help/info on IDE drives
+#
+# CONFIG_BLK_DEV_IDE_SATA is not set
+CONFIG_BLK_DEV_IDEDISK=y
+CONFIG_IDEDISK_MULTI_MODE=y
+CONFIG_BLK_DEV_IDECD=y
+# CONFIG_BLK_DEV_IDETAPE is not set
+# CONFIG_BLK_DEV_IDEFLOPPY is not set
+# CONFIG_BLK_DEV_IDESCSI is not set
+# CONFIG_IDE_TASK_IOCTL is not set
+# CONFIG_IDE_TASKFILE_IO is not set
+
+#
+# IDE chipset support/bugfixes
+#
+CONFIG_IDE_GENERIC=y
+# CONFIG_BLK_DEV_IDEPNP is not set
+CONFIG_BLK_DEV_IDEPCI=y
+# CONFIG_IDEPCI_SHARE_IRQ is not set
+# CONFIG_BLK_DEV_OFFBOARD is not set
+CONFIG_BLK_DEV_GENERIC=y
+# CONFIG_BLK_DEV_OPTI621 is not set
+CONFIG_BLK_DEV_IDEDMA_PCI=y
+# CONFIG_BLK_DEV_IDEDMA_FORCED is not set
+CONFIG_IDEDMA_PCI_AUTO=y
+# CONFIG_IDEDMA_ONLYDISK is not set
+# CONFIG_BLK_DEV_AEC62XX is not set
+CONFIG_BLK_DEV_ALI15X3=y
+# CONFIG_WDC_ALI15X3 is not set
+# CONFIG_BLK_DEV_AMD74XX is not set
+CONFIG_BLK_DEV_CMD64X=y
+# CONFIG_BLK_DEV_TRIFLEX is not set
+CONFIG_BLK_DEV_CY82C693=y
+# CONFIG_BLK_DEV_CS5520 is not set
+# CONFIG_BLK_DEV_CS5530 is not set
+# CONFIG_BLK_DEV_HPT34X is not set
+# CONFIG_BLK_DEV_HPT366 is not set
+# CONFIG_BLK_DEV_SC1200 is not set
+# CONFIG_BLK_DEV_PIIX is not set
+# CONFIG_BLK_DEV_NS87415 is not set
+# CONFIG_BLK_DEV_PDC202XX_OLD is not set
+# CONFIG_BLK_DEV_PDC202XX_NEW is not set
+# CONFIG_BLK_DEV_SVWKS is not set
+# CONFIG_BLK_DEV_SIIMAGE is not set
+# CONFIG_BLK_DEV_SLC90E66 is not set
+# CONFIG_BLK_DEV_TRM290 is not set
+# CONFIG_BLK_DEV_VIA82CXXX is not set
+# CONFIG_IDE_ARM is not set
+# CONFIG_IDE_CHIPSETS is not set
+CONFIG_BLK_DEV_IDEDMA=y
+# CONFIG_IDEDMA_IVB is not set
+CONFIG_IDEDMA_AUTO=y
+# CONFIG_BLK_DEV_HD is not set
+
+#
+# SCSI device support
+#
+CONFIG_SCSI=y
+CONFIG_SCSI_PROC_FS=y
+
+#
+# SCSI support type (disk, tape, CD-ROM)
+#
+CONFIG_BLK_DEV_SD=y
+# CONFIG_CHR_DEV_ST is not set
+# CONFIG_CHR_DEV_OSST is not set
+CONFIG_BLK_DEV_SR=y
+CONFIG_BLK_DEV_SR_VENDOR=y
+# CONFIG_CHR_DEV_SG is not set
+
+#
+# Some SCSI devices (e.g. CD jukebox) support multiple LUNs
+#
+# CONFIG_SCSI_MULTI_LUN is not set
+# CONFIG_SCSI_CONSTANTS is not set
+# CONFIG_SCSI_LOGGING is not set
+
+#
+# SCSI Transport Attributes
+#
+# CONFIG_SCSI_SPI_ATTRS is not set
+# CONFIG_SCSI_FC_ATTRS is not set
+
+#
+# SCSI low-level drivers
+#
+# CONFIG_BLK_DEV_3W_XXXX_RAID is not set
+# CONFIG_SCSI_3W_9XXX is not set
+# CONFIG_SCSI_7000FASST is not set
+# CONFIG_SCSI_ACARD is not set
+# CONFIG_SCSI_AHA1542 is not set
+# CONFIG_SCSI_AHA1740 is not set
+# CONFIG_SCSI_AACRAID is not set
+CONFIG_SCSI_AIC7XXX=m
+CONFIG_AIC7XXX_CMDS_PER_DEVICE=253
+CONFIG_AIC7XXX_RESET_DELAY_MS=5000
+# CONFIG_AIC7XXX_PROBE_EISA_VL is not set
+# CONFIG_AIC7XXX_DEBUG_ENABLE is not set
+CONFIG_AIC7XXX_DEBUG_MASK=0
+CONFIG_AIC7XXX_REG_PRETTY_PRINT=y
+# CONFIG_SCSI_AIC7XXX_OLD is not set
+# CONFIG_SCSI_AIC79XX is not set
+# CONFIG_SCSI_IN2000 is not set
+# CONFIG_MEGARAID_NEWGEN is not set
+# CONFIG_MEGARAID_LEGACY is not set
+# CONFIG_SCSI_SATA is not set
+# CONFIG_SCSI_BUSLOGIC is not set
+# CONFIG_SCSI_DMX3191D is not set
+# CONFIG_SCSI_DTC3280 is not set
+# CONFIG_SCSI_EATA is not set
+# CONFIG_SCSI_EATA_PIO is not set
+# CONFIG_SCSI_FUTURE_DOMAIN is not set
+# CONFIG_SCSI_GDTH is not set
+# CONFIG_SCSI_GENERIC_NCR5380 is not set
+# CONFIG_SCSI_GENERIC_NCR5380_MMIO is not set
+# CONFIG_SCSI_IPS is not set
+# CONFIG_SCSI_INIA100 is not set
+# CONFIG_SCSI_NCR53C406A is not set
+# CONFIG_SCSI_SYM53C8XX_2 is not set
+# CONFIG_SCSI_IPR is not set
+# CONFIG_SCSI_PAS16 is not set
+# CONFIG_SCSI_PSI240I is not set
+# CONFIG_SCSI_QLOGIC_FAS is not set
+# CONFIG_SCSI_QLOGIC_ISP is not set
+# CONFIG_SCSI_QLOGIC_FC is not set
+# CONFIG_SCSI_QLOGIC_1280 is not set
+CONFIG_SCSI_QLA2XXX=y
+# CONFIG_SCSI_QLA21XX is not set
+# CONFIG_SCSI_QLA22XX is not set
+# CONFIG_SCSI_QLA2300 is not set
+# CONFIG_SCSI_QLA2322 is not set
+# CONFIG_SCSI_QLA6312 is not set
+# CONFIG_SCSI_QLA6322 is not set
+# CONFIG_SCSI_SIM710 is not set
+# CONFIG_SCSI_SYM53C416 is not set
+# CONFIG_SCSI_DC395x is not set
+# CONFIG_SCSI_DC390T is not set
+# CONFIG_SCSI_T128 is not set
+# CONFIG_SCSI_U14_34F is not set
+# CONFIG_SCSI_DEBUG is not set
+
+#
+# Old CD-ROM drivers (not SCSI, not IDE)
+#
+# CONFIG_CD_NO_IDESCSI is not set
+
+#
+# Multi-device support (RAID and LVM)
+#
+# CONFIG_MD is not set
+
+#
+# Fusion MPT device support
+#
+# CONFIG_FUSION is not set
+
+#
+# IEEE 1394 (FireWire) support
+#
+# CONFIG_IEEE1394 is not set
+
+#
+# I2O device support
+#
+# CONFIG_I2O is not set
+
+#
+# Networking support
+#
+CONFIG_NET=y
+
+#
+# Networking options
+#
+CONFIG_PACKET=y
+# CONFIG_PACKET_MMAP is not set
+CONFIG_NETLINK_DEV=y
+CONFIG_UNIX=y
+CONFIG_NET_KEY=m
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+# CONFIG_IP_ADVANCED_ROUTER is not set
+# CONFIG_IP_PNP is not set
+# CONFIG_NET_IPIP is not set
+# CONFIG_NET_IPGRE is not set
+# CONFIG_IP_MROUTE is not set
+# CONFIG_ARPD is not set
+# CONFIG_SYN_COOKIES is not set
+CONFIG_INET_AH=m
+CONFIG_INET_ESP=m
+# CONFIG_INET_IPCOMP is not set
+# CONFIG_INET_TUNNEL is not set
+
+#
+# IP: Virtual Server Configuration
+#
+# CONFIG_IP_VS is not set
+# CONFIG_IPV6 is not set
+CONFIG_NETFILTER=y
+# CONFIG_NETFILTER_DEBUG is not set
+
+#
+# IP: Netfilter Configuration
+#
+CONFIG_IP_NF_CONNTRACK=m
+# CONFIG_IP_NF_CT_ACCT is not set
+# CONFIG_IP_NF_CT_PROTO_SCTP is not set
+CONFIG_IP_NF_FTP=m
+CONFIG_IP_NF_IRC=m
+# CONFIG_IP_NF_TFTP is not set
+# CONFIG_IP_NF_AMANDA is not set
+CONFIG_IP_NF_QUEUE=m
+CONFIG_IP_NF_IPTABLES=m
+# CONFIG_IP_NF_MATCH_LIMIT is not set
+# CONFIG_IP_NF_MATCH_IPRANGE is not set
+# CONFIG_IP_NF_MATCH_MAC is not set
+# CONFIG_IP_NF_MATCH_PKTTYPE is not set
+# CONFIG_IP_NF_MATCH_MARK is not set
+# CONFIG_IP_NF_MATCH_MULTIPORT is not set
+# CONFIG_IP_NF_MATCH_TOS is not set
+# CONFIG_IP_NF_MATCH_RECENT is not set
+# CONFIG_IP_NF_MATCH_ECN is not set
+# CONFIG_IP_NF_MATCH_DSCP is not set
+# CONFIG_IP_NF_MATCH_AH_ESP is not set
+# CONFIG_IP_NF_MATCH_LENGTH is not set
+# CONFIG_IP_NF_MATCH_TTL is not set
+# CONFIG_IP_NF_MATCH_TCPMSS is not set
+# CONFIG_IP_NF_MATCH_HELPER is not set
+# CONFIG_IP_NF_MATCH_STATE is not set
+# CONFIG_IP_NF_MATCH_CONNTRACK is not set
+# CONFIG_IP_NF_MATCH_OWNER is not set
+# CONFIG_IP_NF_MATCH_ADDRTYPE is not set
+# CONFIG_IP_NF_MATCH_REALM is not set
+# CONFIG_IP_NF_MATCH_SCTP is not set
+# CONFIG_IP_NF_MATCH_COMMENT is not set
+CONFIG_IP_NF_FILTER=m
+# CONFIG_IP_NF_TARGET_REJECT is not set
+# CONFIG_IP_NF_TARGET_LOG is not set
+# CONFIG_IP_NF_TARGET_ULOG is not set
+# CONFIG_IP_NF_TARGET_TCPMSS is not set
+CONFIG_IP_NF_NAT=m
+CONFIG_IP_NF_NAT_NEEDED=y
+CONFIG_IP_NF_TARGET_MASQUERADE=m
+# CONFIG_IP_NF_TARGET_REDIRECT is not set
+# CONFIG_IP_NF_TARGET_NETMAP is not set
+# CONFIG_IP_NF_TARGET_SAME is not set
+# CONFIG_IP_NF_NAT_SNMP_BASIC is not set
+CONFIG_IP_NF_NAT_IRC=m
+CONFIG_IP_NF_NAT_FTP=m
+# CONFIG_IP_NF_MANGLE is not set
+# CONFIG_IP_NF_RAW is not set
+# CONFIG_IP_NF_ARPTABLES is not set
+CONFIG_IP_NF_COMPAT_IPCHAINS=y
+CONFIG_XFRM=y
+CONFIG_XFRM_USER=m
+
+#
+# SCTP Configuration (EXPERIMENTAL)
+#
+# CONFIG_IP_SCTP is not set
+# CONFIG_ATM is not set
+# CONFIG_BRIDGE is not set
+CONFIG_VLAN_8021Q=m
+# CONFIG_DECNET is not set
+# CONFIG_LLC2 is not set
+# CONFIG_IPX is not set
+# CONFIG_ATALK is not set
+# CONFIG_X25 is not set
+# CONFIG_LAPB is not set
+# CONFIG_NET_DIVERT is not set
+# CONFIG_ECONET is not set
+# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_HW_FLOWCONTROL is not set
+
+#
+# QoS and/or fair queueing
+#
+# CONFIG_NET_SCHED is not set
+# CONFIG_NET_CLS_ROUTE is not set
+
+#
+# Network testing
+#
+# CONFIG_NET_PKTGEN is not set
+# CONFIG_NETPOLL is not set
+# CONFIG_NET_POLL_CONTROLLER is not set
+# CONFIG_HAMRADIO is not set
+# CONFIG_IRDA is not set
+# CONFIG_BT is not set
+CONFIG_NETDEVICES=y
+CONFIG_DUMMY=m
+# CONFIG_BONDING is not set
+# CONFIG_EQUALIZER is not set
+# CONFIG_TUN is not set
+# CONFIG_ETHERTAP is not set
+# CONFIG_NET_SB1000 is not set
+
+#
+# ARCnet devices
+#
+# CONFIG_ARCNET is not set
+
+#
+# Ethernet (10 or 100Mbit)
+#
+CONFIG_NET_ETHERNET=y
+CONFIG_MII=y
+# CONFIG_HAPPYMEAL is not set
+# CONFIG_SUNGEM is not set
+CONFIG_NET_VENDOR_3COM=y
+# CONFIG_EL1 is not set
+# CONFIG_EL2 is not set
+# CONFIG_ELPLUS is not set
+# CONFIG_EL16 is not set
+# CONFIG_EL3 is not set
+# CONFIG_3C515 is not set
+CONFIG_VORTEX=y
+# CONFIG_TYPHOON is not set
+# CONFIG_LANCE is not set
+# CONFIG_NET_VENDOR_SMC is not set
+# CONFIG_NET_VENDOR_RACAL is not set
+
+#
+# Tulip family network device support
+#
+CONFIG_NET_TULIP=y
+CONFIG_DE2104X=m
+CONFIG_TULIP=y
+# CONFIG_TULIP_MWI is not set
+CONFIG_TULIP_MMIO=y
+# CONFIG_TULIP_NAPI is not set
+# CONFIG_DE4X5 is not set
+# CONFIG_WINBOND_840 is not set
+# CONFIG_DM9102 is not set
+# CONFIG_AT1700 is not set
+# CONFIG_DEPCA is not set
+# CONFIG_HP100 is not set
+# CONFIG_NET_ISA is not set
+CONFIG_NET_PCI=y
+# CONFIG_PCNET32 is not set
+# CONFIG_AMD8111_ETH is not set
+# CONFIG_ADAPTEC_STARFIRE is not set
+# CONFIG_AC3200 is not set
+# CONFIG_APRICOT is not set
+# CONFIG_B44 is not set
+# CONFIG_FORCEDETH is not set
+# CONFIG_CS89x0 is not set
+# CONFIG_DGRS is not set
+# CONFIG_EEPRO100 is not set
+# CONFIG_E100 is not set
+# CONFIG_LNE390 is not set
+# CONFIG_FEALNX is not set
+# CONFIG_NATSEMI is not set
+# CONFIG_NE2K_PCI is not set
+# CONFIG_NE3210 is not set
+# CONFIG_ES3210 is not set
+# CONFIG_8139CP is not set
+# CONFIG_8139TOO is not set
+# CONFIG_SIS900 is not set
+# CONFIG_EPIC100 is not set
+# CONFIG_SUNDANCE is not set
+# CONFIG_VIA_RHINE is not set
+# CONFIG_VIA_VELOCITY is not set
+# CONFIG_NET_POCKET is not set
+
+#
+# Ethernet (1000 Mbit)
+#
+# CONFIG_ACENIC is not set
+# CONFIG_DL2K is not set
+# CONFIG_E1000 is not set
+# CONFIG_NS83820 is not set
+# CONFIG_HAMACHI is not set
+CONFIG_YELLOWFIN=y
+# CONFIG_R8169 is not set
+# CONFIG_SK98LIN is not set
+# CONFIG_TIGON3 is not set
+
+#
+# Ethernet (10000 Mbit)
+#
+# CONFIG_IXGB is not set
+# CONFIG_S2IO is not set
+
+#
+# Token Ring devices
+#
+# CONFIG_TR is not set
+
+#
+# Wireless LAN (non-hamradio)
+#
+# CONFIG_NET_RADIO is not set
+
+#
+# Wan interfaces
+#
+# CONFIG_WAN is not set
+# CONFIG_FDDI is not set
+# CONFIG_HIPPI is not set
+# CONFIG_PPP is not set
+# CONFIG_SLIP is not set
+# CONFIG_NET_FC is not set
+# CONFIG_SHAPER is not set
+# CONFIG_NETCONSOLE is not set
+
+#
+# ISDN subsystem
+#
+# CONFIG_ISDN is not set
+
+#
+# Telephony Support
+#
+# CONFIG_PHONE is not set
+
+#
+# Input device support
+#
+CONFIG_INPUT=y
+
+#
+# Userland interfaces
+#
+CONFIG_INPUT_MOUSEDEV=y
+CONFIG_INPUT_MOUSEDEV_PSAUX=y
+CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024
+CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768
+# CONFIG_INPUT_JOYDEV is not set
+# CONFIG_INPUT_TSDEV is not set
+# CONFIG_INPUT_EVDEV is not set
+# CONFIG_INPUT_EVBUG is not set
+
+#
+# Input I/O drivers
+#
+# CONFIG_GAMEPORT is not set
+CONFIG_SOUND_GAMEPORT=y
+CONFIG_SERIO=y
+CONFIG_SERIO_I8042=y
+CONFIG_SERIO_SERPORT=y
+# CONFIG_SERIO_CT82C710 is not set
+# CONFIG_SERIO_PCIPS2 is not set
+# CONFIG_SERIO_RAW is not set
+
+#
+# Input Device Drivers
+#
+CONFIG_INPUT_KEYBOARD=y
+CONFIG_KEYBOARD_ATKBD=y
+# CONFIG_KEYBOARD_SUNKBD is not set
+# CONFIG_KEYBOARD_LKKBD is not set
+# CONFIG_KEYBOARD_XTKBD is not set
+# CONFIG_KEYBOARD_NEWTON is not set
+CONFIG_INPUT_MOUSE=y
+CONFIG_MOUSE_PS2=y
+# CONFIG_MOUSE_SERIAL is not set
+# CONFIG_MOUSE_INPORT is not set
+# CONFIG_MOUSE_LOGIBM is not set
+# CONFIG_MOUSE_PC110PAD is not set
+# CONFIG_MOUSE_VSXXXAA is not set
+# CONFIG_INPUT_JOYSTICK is not set
+# CONFIG_INPUT_TOUCHSCREEN is not set
+# CONFIG_INPUT_MISC is not set
+
+#
+# Character devices
+#
+CONFIG_VT=y
+CONFIG_VT_CONSOLE=y
+CONFIG_HW_CONSOLE=y
+# CONFIG_SERIAL_NONSTANDARD is not set
+
+#
+# Serial drivers
+#
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_8250_NR_UARTS=4
+# CONFIG_SERIAL_8250_EXTENDED is not set
+
+#
+# Non-8250 serial port support
+#
+CONFIG_SERIAL_CORE=y
+CONFIG_SERIAL_CORE_CONSOLE=y
+CONFIG_UNIX98_PTYS=y
+CONFIG_LEGACY_PTYS=y
+CONFIG_LEGACY_PTY_COUNT=256
+
+#
+# IPMI
+#
+# CONFIG_IPMI_HANDLER is not set
+
+#
+# Watchdog Cards
+#
+# CONFIG_WATCHDOG is not set
+CONFIG_RTC=y
+# CONFIG_DTLK is not set
+# CONFIG_R3964 is not set
+# CONFIG_APPLICOM is not set
+
+#
+# Ftape, the floppy tape device driver
+#
+# CONFIG_FTAPE is not set
+# CONFIG_AGP is not set
+# CONFIG_DRM is not set
+# CONFIG_RAW_DRIVER is not set
+
+#
+# I2C support
+#
+# CONFIG_I2C is not set
+
+#
+# Dallas's 1-wire bus
+#
+# CONFIG_W1 is not set
+
+#
+# Misc devices
+#
+
+#
+# Multimedia devices
+#
+# CONFIG_VIDEO_DEV is not set
+
+#
+# Digital Video Broadcasting Devices
+#
+# CONFIG_DVB is not set
+
+#
+# Graphics support
+#
+# CONFIG_FB is not set
+
+#
+# Console display driver support
+#
+CONFIG_VGA_CONSOLE=y
+# CONFIG_MDA_CONSOLE is not set
+CONFIG_DUMMY_CONSOLE=y
+
+#
+# Sound
+#
+# CONFIG_SOUND is not set
+
+#
+# USB support
+#
+# CONFIG_USB is not set
+
+#
+# USB Gadget Support
+#
+# CONFIG_USB_GADGET is not set
+
+#
+# File systems
+#
+CONFIG_EXT2_FS=y
+# CONFIG_EXT2_FS_XATTR is not set
+# CONFIG_EXT3_FS is not set
+# CONFIG_JBD is not set
+CONFIG_REISERFS_FS=m
+# CONFIG_REISERFS_CHECK is not set
+# CONFIG_REISERFS_PROC_INFO is not set
+# CONFIG_REISERFS_FS_XATTR is not set
+# CONFIG_JFS_FS is not set
+# CONFIG_XFS_FS is not set
+# CONFIG_MINIX_FS is not set
+# CONFIG_ROMFS_FS is not set
+# CONFIG_QUOTA is not set
+CONFIG_AUTOFS_FS=m
+# CONFIG_AUTOFS4_FS is not set
+
+#
+# CD-ROM/DVD Filesystems
+#
+CONFIG_ISO9660_FS=y
+# CONFIG_JOLIET is not set
+# CONFIG_ZISOFS is not set
+# CONFIG_UDF_FS is not set
+
+#
+# DOS/FAT/NT Filesystems
+#
+CONFIG_FAT_FS=y
+CONFIG_MSDOS_FS=y
+CONFIG_VFAT_FS=y
+CONFIG_FAT_DEFAULT_CODEPAGE=437
+CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"
+# CONFIG_NTFS_FS is not set
+
+#
+# Pseudo filesystems
+#
+CONFIG_PROC_FS=y
+CONFIG_PROC_KCORE=y
+CONFIG_SYSFS=y
+# CONFIG_DEVFS_FS is not set
+# CONFIG_DEVPTS_FS_XATTR is not set
+CONFIG_TMPFS=y
+# CONFIG_HUGETLB_PAGE is not set
+CONFIG_RAMFS=y
+
+#
+# Miscellaneous filesystems
+#
+# CONFIG_ADFS_FS is not set
+# CONFIG_AFFS_FS is not set
+# CONFIG_HFS_FS is not set
+# CONFIG_HFSPLUS_FS is not set
+# CONFIG_BEFS_FS is not set
+# CONFIG_BFS_FS is not set
+# CONFIG_EFS_FS is not set
+# CONFIG_CRAMFS is not set
+# CONFIG_VXFS_FS is not set
+# CONFIG_HPFS_FS is not set
+# CONFIG_QNX4FS_FS is not set
+# CONFIG_SYSV_FS is not set
+# CONFIG_UFS_FS is not set
+
+#
+# Network File Systems
+#
+CONFIG_NFS_FS=m
+CONFIG_NFS_V3=y
+# CONFIG_NFS_V4 is not set
+# CONFIG_NFS_DIRECTIO is not set
+CONFIG_NFSD=m
+CONFIG_NFSD_V3=y
+# CONFIG_NFSD_V4 is not set
+CONFIG_NFSD_TCP=y
+CONFIG_LOCKD=m
+CONFIG_LOCKD_V4=y
+CONFIG_EXPORTFS=m
+CONFIG_SUNRPC=m
+# CONFIG_RPCSEC_GSS_KRB5 is not set
+# CONFIG_RPCSEC_GSS_SPKM3 is not set
+# CONFIG_SMB_FS is not set
+# CONFIG_CIFS is not set
+# CONFIG_NCP_FS is not set
+# CONFIG_CODA_FS is not set
+# CONFIG_AFS_FS is not set
+
+#
+# Partition Types
+#
+# CONFIG_PARTITION_ADVANCED is not set
+CONFIG_OSF_PARTITION=y
+CONFIG_MSDOS_PARTITION=y
+
+#
+# Native Language Support
+#
+CONFIG_NLS=y
+CONFIG_NLS_DEFAULT="iso8859-1"
+CONFIG_NLS_CODEPAGE_437=y
+# CONFIG_NLS_CODEPAGE_737 is not set
+# CONFIG_NLS_CODEPAGE_775 is not set
+# CONFIG_NLS_CODEPAGE_850 is not set
+# CONFIG_NLS_CODEPAGE_852 is not set
+# CONFIG_NLS_CODEPAGE_855 is not set
+# CONFIG_NLS_CODEPAGE_857 is not set
+# CONFIG_NLS_CODEPAGE_860 is not set
+# CONFIG_NLS_CODEPAGE_861 is not set
+# CONFIG_NLS_CODEPAGE_862 is not set
+# CONFIG_NLS_CODEPAGE_863 is not set
+# CONFIG_NLS_CODEPAGE_864 is not set
+# CONFIG_NLS_CODEPAGE_865 is not set
+# CONFIG_NLS_CODEPAGE_866 is not set
+# CONFIG_NLS_CODEPAGE_869 is not set
+# CONFIG_NLS_CODEPAGE_936 is not set
+# CONFIG_NLS_CODEPAGE_950 is not set
+# CONFIG_NLS_CODEPAGE_932 is not set
+# CONFIG_NLS_CODEPAGE_949 is not set
+# CONFIG_NLS_CODEPAGE_874 is not set
+# CONFIG_NLS_ISO8859_8 is not set
+# CONFIG_NLS_CODEPAGE_1250 is not set
+# CONFIG_NLS_CODEPAGE_1251 is not set
+# CONFIG_NLS_ASCII is not set
+# CONFIG_NLS_ISO8859_1 is not set
+# CONFIG_NLS_ISO8859_2 is not set
+# CONFIG_NLS_ISO8859_3 is not set
+# CONFIG_NLS_ISO8859_4 is not set
+# CONFIG_NLS_ISO8859_5 is not set
+# CONFIG_NLS_ISO8859_6 is not set
+# CONFIG_NLS_ISO8859_7 is not set
+# CONFIG_NLS_ISO8859_9 is not set
+# CONFIG_NLS_ISO8859_13 is not set
+# CONFIG_NLS_ISO8859_14 is not set
+# CONFIG_NLS_ISO8859_15 is not set
+# CONFIG_NLS_KOI8_R is not set
+# CONFIG_NLS_KOI8_U is not set
+# CONFIG_NLS_UTF8 is not set
+
+#
+# Profiling support
+#
+# CONFIG_PROFILING is not set
+
+#
+# Kernel hacking
+#
+CONFIG_DEBUG_KERNEL=y
+CONFIG_MAGIC_SYSRQ=y
+# CONFIG_DEBUG_SLAB is not set
+# CONFIG_DEBUG_SPINLOCK is not set
+CONFIG_DEBUG_INFO=y
+CONFIG_EARLY_PRINTK=y
+# CONFIG_DEBUG_RWLOCK is not set
+# CONFIG_DEBUG_SEMAPHORE is not set
+CONFIG_ALPHA_LEGACY_START_ADDRESS=y
+CONFIG_MATHEMU=y
+
+#
+# Security options
+#
+# CONFIG_SECURITY is not set
+
+#
+# Cryptographic options
+#
+CONFIG_CRYPTO=y
+CONFIG_CRYPTO_HMAC=y
+# CONFIG_CRYPTO_NULL is not set
+# CONFIG_CRYPTO_MD4 is not set
+CONFIG_CRYPTO_MD5=m
+CONFIG_CRYPTO_SHA1=m
+# CONFIG_CRYPTO_SHA256 is not set
+# CONFIG_CRYPTO_SHA512 is not set
+# CONFIG_CRYPTO_WHIRLPOOL is not set
+CONFIG_CRYPTO_DES=m
+# CONFIG_CRYPTO_BLOWFISH is not set
+# CONFIG_CRYPTO_TWOFISH is not set
+# CONFIG_CRYPTO_SERPENT is not set
+# CONFIG_CRYPTO_AES is not set
+# CONFIG_CRYPTO_CAST5 is not set
+# CONFIG_CRYPTO_CAST6 is not set
+# CONFIG_CRYPTO_TEA is not set
+# CONFIG_CRYPTO_ARC4 is not set
+# CONFIG_CRYPTO_KHAZAD is not set
+# CONFIG_CRYPTO_DEFLATE is not set
+# CONFIG_CRYPTO_MICHAEL_MIC is not set
+# CONFIG_CRYPTO_CRC32C is not set
+# CONFIG_CRYPTO_TEST is not set
+
+#
+# Library routines
+#
+# CONFIG_CRC_CCITT is not set
+CONFIG_CRC32=y
+# CONFIG_LIBCRC32C is not set
diff --git a/arch/alpha/kernel/Makefile b/arch/alpha/kernel/Makefile
new file mode 100644
index 000000000000..ab6fa54b3860
--- /dev/null
+++ b/arch/alpha/kernel/Makefile
@@ -0,0 +1,104 @@
+#
+# Makefile for the linux kernel.
+#
+
+extra-y := head.o vmlinux.lds
+EXTRA_AFLAGS := $(CFLAGS)
+EXTRA_CFLAGS := -Werror -Wno-sign-compare
+
+obj-y := entry.o traps.o process.o init_task.o osf_sys.o irq.o \
+ irq_alpha.o signal.o setup.o ptrace.o time.o semaphore.o \
+ alpha_ksyms.o systbls.o err_common.o io.o
+
+obj-$(CONFIG_VGA_HOSE) += console.o
+obj-$(CONFIG_SMP) += smp.o
+obj-$(CONFIG_PCI) += pci.o pci_iommu.o
+obj-$(CONFIG_SRM_ENV) += srm_env.o
+obj-$(CONFIG_MODULES) += module.o
+
+ifdef CONFIG_ALPHA_GENERIC
+
+obj-y += core_apecs.o core_cia.o core_irongate.o core_lca.o \
+ core_mcpcia.o core_polaris.o core_t2.o \
+ core_tsunami.o
+
+obj-y += sys_alcor.o sys_cabriolet.o sys_dp264.o sys_eb64p.o sys_eiger.o \
+ sys_jensen.o sys_miata.o sys_mikasa.o sys_nautilus.o \
+ sys_noritake.o sys_rawhide.o sys_ruffian.o sys_rx164.o \
+ sys_sable.o sys_sio.o sys_sx164.o sys_takara.o
+
+ifndef CONFIG_ALPHA_LEGACY_START_ADDRESS
+obj-y += core_marvel.o core_titan.o core_wildfire.o
+obj-y += sys_marvel.o sys_titan.o sys_wildfire.o
+obj-y += err_ev7.o err_titan.o err_marvel.o
+endif
+
+obj-y += irq_pyxis.o irq_i8259.o irq_srm.o
+obj-y += err_ev6.o
+obj-y += es1888.o smc37c669.o smc37c93x.o ns87312.o gct.o
+obj-y += srmcons.o
+
+else
+
+# Misc support
+obj-$(CONFIG_ALPHA_SRM) += srmcons.o
+
+# Core logic support
+obj-$(CONFIG_ALPHA_APECS) += core_apecs.o
+obj-$(CONFIG_ALPHA_CIA) += core_cia.o
+obj-$(CONFIG_ALPHA_IRONGATE) += core_irongate.o
+obj-$(CONFIG_ALPHA_LCA) += core_lca.o
+obj-$(CONFIG_ALPHA_MARVEL) += core_marvel.o gct.o
+obj-$(CONFIG_ALPHA_MCPCIA) += core_mcpcia.o
+obj-$(CONFIG_ALPHA_POLARIS) += core_polaris.o
+obj-$(CONFIG_ALPHA_T2) += core_t2.o
+obj-$(CONFIG_ALPHA_TSUNAMI) += core_tsunami.o
+obj-$(CONFIG_ALPHA_TITAN) += core_titan.o
+obj-$(CONFIG_ALPHA_WILDFIRE) += core_wildfire.o
+
+# Board support
+obj-$(CONFIG_ALPHA_ALCOR) += sys_alcor.o irq_i8259.o irq_srm.o
+obj-$(CONFIG_ALPHA_CABRIOLET) += sys_cabriolet.o irq_i8259.o irq_srm.o \
+ ns87312.o
+obj-$(CONFIG_ALPHA_EB164) += sys_cabriolet.o irq_i8259.o irq_srm.o \
+ ns87312.o
+obj-$(CONFIG_ALPHA_EB66P) += sys_cabriolet.o irq_i8259.o irq_srm.o \
+ ns87312.o
+obj-$(CONFIG_ALPHA_LX164) += sys_cabriolet.o irq_i8259.o irq_srm.o \
+ smc37c93x.o
+obj-$(CONFIG_ALPHA_PC164) += sys_cabriolet.o irq_i8259.o irq_srm.o \
+ smc37c93x.o
+obj-$(CONFIG_ALPHA_DP264) += sys_dp264.o irq_i8259.o es1888.o smc37c669.o
+obj-$(CONFIG_ALPHA_SHARK) += sys_dp264.o irq_i8259.o es1888.o smc37c669.o
+obj-$(CONFIG_ALPHA_TITAN) += sys_titan.o irq_i8259.o smc37c669.o
+obj-$(CONFIG_ALPHA_EB64P) += sys_eb64p.o irq_i8259.o
+obj-$(CONFIG_ALPHA_EB66) += sys_eb64p.o irq_i8259.o
+obj-$(CONFIG_ALPHA_EIGER) += sys_eiger.o irq_i8259.o
+obj-$(CONFIG_ALPHA_JENSEN) += sys_jensen.o pci-noop.o irq_i8259.o
+obj-$(CONFIG_ALPHA_MARVEL) += sys_marvel.o
+obj-$(CONFIG_ALPHA_MIATA) += sys_miata.o irq_pyxis.o irq_i8259.o \
+ es1888.o smc37c669.o
+obj-$(CONFIG_ALPHA_MIKASA) += sys_mikasa.o irq_i8259.o irq_srm.o
+obj-$(CONFIG_ALPHA_NAUTILUS) += sys_nautilus.o irq_i8259.o irq_srm.o
+obj-$(CONFIG_ALPHA_NORITAKE) += sys_noritake.o irq_i8259.o
+obj-$(CONFIG_ALPHA_RAWHIDE) += sys_rawhide.o irq_i8259.o
+obj-$(CONFIG_ALPHA_RUFFIAN) += sys_ruffian.o irq_pyxis.o irq_i8259.o
+obj-$(CONFIG_ALPHA_RX164) += sys_rx164.o irq_i8259.o
+obj-$(CONFIG_ALPHA_SABLE) += sys_sable.o
+obj-$(CONFIG_ALPHA_LYNX) += sys_sable.o
+obj-$(CONFIG_ALPHA_BOOK1) += sys_sio.o irq_i8259.o irq_srm.o ns87312.o
+obj-$(CONFIG_ALPHA_AVANTI) += sys_sio.o irq_i8259.o irq_srm.o ns87312.o
+obj-$(CONFIG_ALPHA_NONAME) += sys_sio.o irq_i8259.o irq_srm.o ns87312.o
+obj-$(CONFIG_ALPHA_P2K) += sys_sio.o irq_i8259.o irq_srm.o ns87312.o
+obj-$(CONFIG_ALPHA_XL) += sys_sio.o irq_i8259.o irq_srm.o ns87312.o
+obj-$(CONFIG_ALPHA_SX164) += sys_sx164.o irq_pyxis.o irq_i8259.o \
+ irq_srm.o smc37c669.o
+obj-$(CONFIG_ALPHA_TAKARA) += sys_takara.o irq_i8259.o ns87312.o
+obj-$(CONFIG_ALPHA_WILDFIRE) += sys_wildfire.o irq_i8259.o
+
+# Error support
+obj-$(CONFIG_ALPHA_MARVEL) += err_ev7.o err_marvel.o
+obj-$(CONFIG_ALPHA_NAUTILUS) += err_ev6.o
+obj-$(CONFIG_ALPHA_TITAN) += err_ev6.o err_titan.o
+
+endif # GENERIC
diff --git a/arch/alpha/kernel/alpha_ksyms.c b/arch/alpha/kernel/alpha_ksyms.c
new file mode 100644
index 000000000000..fc5ef90c4fc9
--- /dev/null
+++ b/arch/alpha/kernel/alpha_ksyms.c
@@ -0,0 +1,235 @@
+/*
+ * linux/arch/alpha/kernel/ksyms.c
+ *
+ * Export the alpha-specific functions that are needed for loadable
+ * modules.
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/string.h>
+#include <linux/user.h>
+#include <linux/elfcore.h>
+#include <linux/socket.h>
+#include <linux/syscalls.h>
+#include <linux/in.h>
+#include <linux/in6.h>
+#include <linux/pci.h>
+#include <linux/tty.h>
+#include <linux/mm.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+
+#include <asm/io.h>
+#include <asm/console.h>
+#include <asm/hwrpb.h>
+#include <asm/uaccess.h>
+#include <asm/processor.h>
+#include <asm/checksum.h>
+#include <linux/interrupt.h>
+#include <asm/fpu.h>
+#include <asm/irq.h>
+#include <asm/machvec.h>
+#include <asm/pgalloc.h>
+#include <asm/semaphore.h>
+#include <asm/tlbflush.h>
+#include <asm/cacheflush.h>
+#include <asm/vga.h>
+
+#define __KERNEL_SYSCALLS__
+#include <asm/unistd.h>
+
+extern struct hwrpb_struct *hwrpb;
+extern void dump_thread(struct pt_regs *, struct user *);
+extern spinlock_t rtc_lock;
+
+/* these are C runtime functions with special calling conventions: */
+extern void __divl (void);
+extern void __reml (void);
+extern void __divq (void);
+extern void __remq (void);
+extern void __divlu (void);
+extern void __remlu (void);
+extern void __divqu (void);
+extern void __remqu (void);
+
+EXPORT_SYMBOL(alpha_mv);
+EXPORT_SYMBOL(enable_irq);
+EXPORT_SYMBOL(disable_irq);
+EXPORT_SYMBOL(disable_irq_nosync);
+EXPORT_SYMBOL(probe_irq_mask);
+EXPORT_SYMBOL(screen_info);
+EXPORT_SYMBOL(perf_irq);
+EXPORT_SYMBOL(callback_getenv);
+EXPORT_SYMBOL(callback_setenv);
+EXPORT_SYMBOL(callback_save_env);
+#ifdef CONFIG_ALPHA_GENERIC
+EXPORT_SYMBOL(alpha_using_srm);
+#endif /* CONFIG_ALPHA_GENERIC */
+
+/* platform dependent support */
+EXPORT_SYMBOL(strcat);
+EXPORT_SYMBOL(strcmp);
+EXPORT_SYMBOL(strcpy);
+EXPORT_SYMBOL(strlen);
+EXPORT_SYMBOL(strncmp);
+EXPORT_SYMBOL(strncpy);
+EXPORT_SYMBOL(strnlen);
+EXPORT_SYMBOL(strncat);
+EXPORT_SYMBOL(strstr);
+EXPORT_SYMBOL(strpbrk);
+EXPORT_SYMBOL(strchr);
+EXPORT_SYMBOL(strrchr);
+EXPORT_SYMBOL(memcmp);
+EXPORT_SYMBOL(memmove);
+EXPORT_SYMBOL(memscan);
+EXPORT_SYMBOL(__memcpy);
+EXPORT_SYMBOL(__memset);
+EXPORT_SYMBOL(__memsetw);
+EXPORT_SYMBOL(__constant_c_memset);
+EXPORT_SYMBOL(copy_page);
+EXPORT_SYMBOL(clear_page);
+
+EXPORT_SYMBOL(__direct_map_base);
+EXPORT_SYMBOL(__direct_map_size);
+
+#ifdef CONFIG_PCI
+EXPORT_SYMBOL(pci_alloc_consistent);
+EXPORT_SYMBOL(pci_free_consistent);
+EXPORT_SYMBOL(pci_map_single);
+EXPORT_SYMBOL(pci_map_page);
+EXPORT_SYMBOL(pci_unmap_single);
+EXPORT_SYMBOL(pci_unmap_page);
+EXPORT_SYMBOL(pci_map_sg);
+EXPORT_SYMBOL(pci_unmap_sg);
+EXPORT_SYMBOL(pci_dma_supported);
+EXPORT_SYMBOL(pci_dac_dma_supported);
+EXPORT_SYMBOL(pci_dac_page_to_dma);
+EXPORT_SYMBOL(pci_dac_dma_to_page);
+EXPORT_SYMBOL(pci_dac_dma_to_offset);
+EXPORT_SYMBOL(alpha_gendev_to_pci);
+#endif
+EXPORT_SYMBOL(dma_set_mask);
+
+EXPORT_SYMBOL(dump_thread);
+EXPORT_SYMBOL(dump_elf_thread);
+EXPORT_SYMBOL(dump_elf_task);
+EXPORT_SYMBOL(dump_elf_task_fp);
+EXPORT_SYMBOL(hwrpb);
+EXPORT_SYMBOL(start_thread);
+EXPORT_SYMBOL(alpha_read_fp_reg);
+EXPORT_SYMBOL(alpha_read_fp_reg_s);
+EXPORT_SYMBOL(alpha_write_fp_reg);
+EXPORT_SYMBOL(alpha_write_fp_reg_s);
+
+/* In-kernel system calls. */
+EXPORT_SYMBOL(kernel_thread);
+EXPORT_SYMBOL(sys_open);
+EXPORT_SYMBOL(sys_dup);
+EXPORT_SYMBOL(sys_exit);
+EXPORT_SYMBOL(sys_write);
+EXPORT_SYMBOL(sys_read);
+EXPORT_SYMBOL(sys_lseek);
+EXPORT_SYMBOL(execve);
+EXPORT_SYMBOL(sys_setsid);
+EXPORT_SYMBOL(sys_wait4);
+
+/* Networking helper routines. */
+EXPORT_SYMBOL(csum_tcpudp_magic);
+EXPORT_SYMBOL(ip_compute_csum);
+EXPORT_SYMBOL(ip_fast_csum);
+EXPORT_SYMBOL(csum_partial_copy_nocheck);
+EXPORT_SYMBOL(csum_partial_copy_from_user);
+EXPORT_SYMBOL(csum_ipv6_magic);
+
+#ifdef CONFIG_MATHEMU_MODULE
+extern long (*alpha_fp_emul_imprecise)(struct pt_regs *, unsigned long);
+extern long (*alpha_fp_emul) (unsigned long pc);
+EXPORT_SYMBOL(alpha_fp_emul_imprecise);
+EXPORT_SYMBOL(alpha_fp_emul);
+#endif
+
+#ifdef CONFIG_ALPHA_BROKEN_IRQ_MASK
+EXPORT_SYMBOL(__min_ipl);
+#endif
+
+/*
+ * The following are specially called from the uaccess assembly stubs.
+ */
+EXPORT_SYMBOL(__copy_user);
+EXPORT_SYMBOL(__do_clear_user);
+EXPORT_SYMBOL(__strncpy_from_user);
+EXPORT_SYMBOL(__strnlen_user);
+
+/* Semaphore helper functions. */
+EXPORT_SYMBOL(__down_failed);
+EXPORT_SYMBOL(__down_failed_interruptible);
+EXPORT_SYMBOL(__up_wakeup);
+EXPORT_SYMBOL(down);
+EXPORT_SYMBOL(down_interruptible);
+EXPORT_SYMBOL(down_trylock);
+EXPORT_SYMBOL(up);
+
+/*
+ * SMP-specific symbols.
+ */
+
+#ifdef CONFIG_SMP
+EXPORT_SYMBOL(synchronize_irq);
+EXPORT_SYMBOL(flush_tlb_mm);
+EXPORT_SYMBOL(flush_tlb_range);
+EXPORT_SYMBOL(flush_tlb_page);
+EXPORT_SYMBOL(smp_imb);
+EXPORT_SYMBOL(cpu_data);
+EXPORT_SYMBOL(smp_num_cpus);
+EXPORT_SYMBOL(smp_call_function);
+EXPORT_SYMBOL(smp_call_function_on_cpu);
+EXPORT_SYMBOL(_atomic_dec_and_lock);
+#ifdef CONFIG_DEBUG_SPINLOCK
+EXPORT_SYMBOL(_raw_spin_unlock);
+EXPORT_SYMBOL(debug_spin_lock);
+EXPORT_SYMBOL(debug_spin_trylock);
+#endif
+#ifdef CONFIG_DEBUG_RWLOCK
+EXPORT_SYMBOL(_raw_write_lock);
+EXPORT_SYMBOL(_raw_read_lock);
+#endif
+EXPORT_SYMBOL(cpu_present_mask);
+#endif /* CONFIG_SMP */
+
+/*
+ * NUMA specific symbols
+ */
+#ifdef CONFIG_DISCONTIGMEM
+EXPORT_SYMBOL(node_data);
+#endif /* CONFIG_DISCONTIGMEM */
+
+EXPORT_SYMBOL(rtc_lock);
+
+/*
+ * The following are special because they're not called
+ * explicitly (the C compiler or assembler generates them in
+ * response to division operations). Fortunately, their
+ * interface isn't gonna change any time soon now, so it's OK
+ * to leave it out of version control.
+ */
+# undef memcpy
+# undef memset
+EXPORT_SYMBOL(__divl);
+EXPORT_SYMBOL(__divlu);
+EXPORT_SYMBOL(__divq);
+EXPORT_SYMBOL(__divqu);
+EXPORT_SYMBOL(__reml);
+EXPORT_SYMBOL(__remlu);
+EXPORT_SYMBOL(__remq);
+EXPORT_SYMBOL(__remqu);
+EXPORT_SYMBOL(memcpy);
+EXPORT_SYMBOL(memset);
+EXPORT_SYMBOL(memchr);
+
+EXPORT_SYMBOL(get_wchan);
+
+#ifdef CONFIG_ALPHA_IRONGATE
+EXPORT_SYMBOL(irongate_ioremap);
+EXPORT_SYMBOL(irongate_iounmap);
+#endif
diff --git a/arch/alpha/kernel/asm-offsets.c b/arch/alpha/kernel/asm-offsets.c
new file mode 100644
index 000000000000..8f2e5c718b50
--- /dev/null
+++ b/arch/alpha/kernel/asm-offsets.c
@@ -0,0 +1,43 @@
+/*
+ * Generate definitions needed by assembly language modules.
+ * This code generates raw asm output which is post-processed to extract
+ * and format the required data.
+ */
+
+#include <linux/types.h>
+#include <linux/stddef.h>
+#include <linux/sched.h>
+#include <linux/ptrace.h>
+#include <asm/io.h>
+
+#define DEFINE(sym, val) \
+ asm volatile("\n->" #sym " %0 " #val : : "i" (val))
+
+#define BLANK() asm volatile("\n->" : : )
+
+void foo(void)
+{
+ DEFINE(TI_TASK, offsetof(struct thread_info, task));
+ DEFINE(TI_FLAGS, offsetof(struct thread_info, flags));
+ DEFINE(TI_CPU, offsetof(struct thread_info, cpu));
+ BLANK();
+
+ DEFINE(TASK_BLOCKED, offsetof(struct task_struct, blocked));
+ DEFINE(TASK_UID, offsetof(struct task_struct, uid));
+ DEFINE(TASK_EUID, offsetof(struct task_struct, euid));
+ DEFINE(TASK_GID, offsetof(struct task_struct, gid));
+ DEFINE(TASK_EGID, offsetof(struct task_struct, egid));
+ DEFINE(TASK_REAL_PARENT, offsetof(struct task_struct, real_parent));
+ DEFINE(TASK_TGID, offsetof(struct task_struct, tgid));
+ BLANK();
+
+ DEFINE(SIZEOF_PT_REGS, sizeof(struct pt_regs));
+ DEFINE(PT_PTRACED, PT_PTRACED);
+ DEFINE(CLONE_VM, CLONE_VM);
+ DEFINE(CLONE_UNTRACED, CLONE_UNTRACED);
+ DEFINE(SIGCHLD, SIGCHLD);
+ BLANK();
+
+ DEFINE(HAE_CACHE, offsetof(struct alpha_machine_vector, hae_cache));
+ DEFINE(HAE_REG, offsetof(struct alpha_machine_vector, hae_register));
+}
diff --git a/arch/alpha/kernel/console.c b/arch/alpha/kernel/console.c
new file mode 100644
index 000000000000..cb3e739fbad8
--- /dev/null
+++ b/arch/alpha/kernel/console.c
@@ -0,0 +1,66 @@
+/*
+ * linux/arch/alpha/kernel/console.c
+ *
+ * Architecture-specific specific support for VGA device on
+ * non-0 I/O hose
+ */
+
+#include <linux/config.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/tty.h>
+#include <linux/console.h>
+#include <asm/vga.h>
+#include <asm/machvec.h>
+
+#ifdef CONFIG_VGA_HOSE
+
+/*
+ * Externally-visible vga hose bases
+ */
+unsigned long __vga_hose_io_base = 0; /* base for default hose */
+unsigned long __vga_hose_mem_base = 0; /* base for default hose */
+
+static struct pci_controller * __init
+default_vga_hose_select(struct pci_controller *h1, struct pci_controller *h2)
+{
+ if (h2->index < h1->index)
+ return h2;
+
+ return h1;
+}
+
+void __init
+set_vga_hose(struct pci_controller *hose)
+{
+ if (hose) {
+ __vga_hose_io_base = hose->io_space->start;
+ __vga_hose_mem_base = hose->mem_space->start;
+ }
+}
+
+void __init
+locate_and_init_vga(void *(*sel_func)(void *, void *))
+{
+ struct pci_controller *hose = NULL;
+ struct pci_dev *dev = NULL;
+
+ if (!sel_func) sel_func = (void *)default_vga_hose_select;
+
+ for(dev=NULL; (dev=pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, dev));) {
+ if (!hose) hose = dev->sysdata;
+ else hose = sel_func(hose, dev->sysdata);
+ }
+
+ /* Did we already inititialize the correct one? */
+ if (conswitchp == &vga_con &&
+ __vga_hose_io_base == hose->io_space->start &&
+ __vga_hose_mem_base == hose->mem_space->start)
+ return;
+
+ /* Set the VGA hose and init the new console */
+ set_vga_hose(hose);
+ take_over_console(&vga_con, 0, MAX_NR_CONSOLES-1, 1);
+}
+
+#endif
diff --git a/arch/alpha/kernel/core_apecs.c b/arch/alpha/kernel/core_apecs.c
new file mode 100644
index 000000000000..a27ba12ba35e
--- /dev/null
+++ b/arch/alpha/kernel/core_apecs.c
@@ -0,0 +1,418 @@
+/*
+ * linux/arch/alpha/kernel/core_apecs.c
+ *
+ * Rewritten for Apecs from the lca.c from:
+ *
+ * Written by David Mosberger (davidm@cs.arizona.edu) with some code
+ * taken from Dave Rusling's (david.rusling@reo.mts.dec.com) 32-bit
+ * bios code.
+ *
+ * Code common to all APECS core logic chips.
+ */
+
+#define __EXTERN_INLINE inline
+#include <asm/io.h>
+#include <asm/core_apecs.h>
+#undef __EXTERN_INLINE
+
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+
+#include <asm/ptrace.h>
+#include <asm/smp.h>
+
+#include "proto.h"
+#include "pci_impl.h"
+
+/*
+ * NOTE: Herein lie back-to-back mb instructions. They are magic.
+ * One plausible explanation is that the i/o controller does not properly
+ * handle the system transaction. Another involves timing. Ho hum.
+ */
+
+/*
+ * BIOS32-style PCI interface:
+ */
+
+#define DEBUG_CONFIG 0
+
+#if DEBUG_CONFIG
+# define DBGC(args) printk args
+#else
+# define DBGC(args)
+#endif
+
+#define vuip volatile unsigned int *
+
+/*
+ * Given a bus, device, and function number, compute resulting
+ * configuration space address and setup the APECS_HAXR2 register
+ * accordingly. It is therefore not safe to have concurrent
+ * invocations to configuration space access routines, but there
+ * really shouldn't be any need for this.
+ *
+ * Type 0:
+ *
+ * 3 3|3 3 2 2|2 2 2 2|2 2 2 2|1 1 1 1|1 1 1 1|1 1
+ * 3 2|1 0 9 8|7 6 5 4|3 2 1 0|9 8 7 6|5 4 3 2|1 0 9 8|7 6 5 4|3 2 1 0
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | | | | | | | | | | | | | | | | | | | | | | | |F|F|F|R|R|R|R|R|R|0|0|
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ *
+ * 31:11 Device select bit.
+ * 10:8 Function number
+ * 7:2 Register number
+ *
+ * Type 1:
+ *
+ * 3 3|3 3 2 2|2 2 2 2|2 2 2 2|1 1 1 1|1 1 1 1|1 1
+ * 3 2|1 0 9 8|7 6 5 4|3 2 1 0|9 8 7 6|5 4 3 2|1 0 9 8|7 6 5 4|3 2 1 0
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | | | | | | | | | | |B|B|B|B|B|B|B|B|D|D|D|D|D|F|F|F|R|R|R|R|R|R|0|1|
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ *
+ * 31:24 reserved
+ * 23:16 bus number (8 bits = 128 possible buses)
+ * 15:11 Device number (5 bits)
+ * 10:8 function number
+ * 7:2 register number
+ *
+ * Notes:
+ * The function number selects which function of a multi-function device
+ * (e.g., SCSI and Ethernet).
+ *
+ * The register selects a DWORD (32 bit) register offset. Hence it
+ * doesn't get shifted by 2 bits as we want to "drop" the bottom two
+ * bits.
+ */
+
+static int
+mk_conf_addr(struct pci_bus *pbus, unsigned int device_fn, int where,
+ unsigned long *pci_addr, unsigned char *type1)
+{
+ unsigned long addr;
+ u8 bus = pbus->number;
+
+ DBGC(("mk_conf_addr(bus=%d ,device_fn=0x%x, where=0x%x,"
+ " pci_addr=0x%p, type1=0x%p)\n",
+ bus, device_fn, where, pci_addr, type1));
+
+ if (bus == 0) {
+ int device = device_fn >> 3;
+
+ /* type 0 configuration cycle: */
+
+ if (device > 20) {
+ DBGC(("mk_conf_addr: device (%d) > 20, returning -1\n",
+ device));
+ return -1;
+ }
+
+ *type1 = 0;
+ addr = (device_fn << 8) | (where);
+ } else {
+ /* type 1 configuration cycle: */
+ *type1 = 1;
+ addr = (bus << 16) | (device_fn << 8) | (where);
+ }
+ *pci_addr = addr;
+ DBGC(("mk_conf_addr: returning pci_addr 0x%lx\n", addr));
+ return 0;
+}
+
+static unsigned int
+conf_read(unsigned long addr, unsigned char type1)
+{
+ unsigned long flags;
+ unsigned int stat0, value;
+ unsigned int haxr2 = 0;
+
+ local_irq_save(flags); /* avoid getting hit by machine check */
+
+ DBGC(("conf_read(addr=0x%lx, type1=%d)\n", addr, type1));
+
+ /* Reset status register to avoid losing errors. */
+ stat0 = *(vuip)APECS_IOC_DCSR;
+ *(vuip)APECS_IOC_DCSR = stat0;
+ mb();
+ DBGC(("conf_read: APECS DCSR was 0x%x\n", stat0));
+
+ /* If Type1 access, must set HAE #2. */
+ if (type1) {
+ haxr2 = *(vuip)APECS_IOC_HAXR2;
+ mb();
+ *(vuip)APECS_IOC_HAXR2 = haxr2 | 1;
+ DBGC(("conf_read: TYPE1 access\n"));
+ }
+
+ draina();
+ mcheck_expected(0) = 1;
+ mcheck_taken(0) = 0;
+ mb();
+
+ /* Access configuration space. */
+
+ /* Some SRMs step on these registers during a machine check. */
+ asm volatile("ldl %0,%1; mb; mb" : "=r"(value) : "m"(*(vuip)addr)
+ : "$9", "$10", "$11", "$12", "$13", "$14", "memory");
+
+ if (mcheck_taken(0)) {
+ mcheck_taken(0) = 0;
+ value = 0xffffffffU;
+ mb();
+ }
+ mcheck_expected(0) = 0;
+ mb();
+
+#if 1
+ /*
+ * david.rusling@reo.mts.dec.com. This code is needed for the
+ * EB64+ as it does not generate a machine check (why I don't
+ * know). When we build kernels for one particular platform
+ * then we can make this conditional on the type.
+ */
+ draina();
+
+ /* Now look for any errors. */
+ stat0 = *(vuip)APECS_IOC_DCSR;
+ DBGC(("conf_read: APECS DCSR after read 0x%x\n", stat0));
+
+ /* Is any error bit set? */
+ if (stat0 & 0xffe0U) {
+ /* If not NDEV, print status. */
+ if (!(stat0 & 0x0800)) {
+ printk("apecs.c:conf_read: got stat0=%x\n", stat0);
+ }
+
+ /* Reset error status. */
+ *(vuip)APECS_IOC_DCSR = stat0;
+ mb();
+ wrmces(0x7); /* reset machine check */
+ value = 0xffffffff;
+ }
+#endif
+
+ /* If Type1 access, must reset HAE #2 so normal IO space ops work. */
+ if (type1) {
+ *(vuip)APECS_IOC_HAXR2 = haxr2 & ~1;
+ mb();
+ }
+ local_irq_restore(flags);
+
+ return value;
+}
+
+static void
+conf_write(unsigned long addr, unsigned int value, unsigned char type1)
+{
+ unsigned long flags;
+ unsigned int stat0;
+ unsigned int haxr2 = 0;
+
+ local_irq_save(flags); /* avoid getting hit by machine check */
+
+ /* Reset status register to avoid losing errors. */
+ stat0 = *(vuip)APECS_IOC_DCSR;
+ *(vuip)APECS_IOC_DCSR = stat0;
+ mb();
+
+ /* If Type1 access, must set HAE #2. */
+ if (type1) {
+ haxr2 = *(vuip)APECS_IOC_HAXR2;
+ mb();
+ *(vuip)APECS_IOC_HAXR2 = haxr2 | 1;
+ }
+
+ draina();
+ mcheck_expected(0) = 1;
+ mb();
+
+ /* Access configuration space. */
+ *(vuip)addr = value;
+ mb();
+ mb(); /* magic */
+ mcheck_expected(0) = 0;
+ mb();
+
+#if 1
+ /*
+ * david.rusling@reo.mts.dec.com. This code is needed for the
+ * EB64+ as it does not generate a machine check (why I don't
+ * know). When we build kernels for one particular platform
+ * then we can make this conditional on the type.
+ */
+ draina();
+
+ /* Now look for any errors. */
+ stat0 = *(vuip)APECS_IOC_DCSR;
+
+ /* Is any error bit set? */
+ if (stat0 & 0xffe0U) {
+ /* If not NDEV, print status. */
+ if (!(stat0 & 0x0800)) {
+ printk("apecs.c:conf_write: got stat0=%x\n", stat0);
+ }
+
+ /* Reset error status. */
+ *(vuip)APECS_IOC_DCSR = stat0;
+ mb();
+ wrmces(0x7); /* reset machine check */
+ }
+#endif
+
+ /* If Type1 access, must reset HAE #2 so normal IO space ops work. */
+ if (type1) {
+ *(vuip)APECS_IOC_HAXR2 = haxr2 & ~1;
+ mb();
+ }
+ local_irq_restore(flags);
+}
+
+static int
+apecs_read_config(struct pci_bus *bus, unsigned int devfn, int where,
+ int size, u32 *value)
+{
+ unsigned long addr, pci_addr;
+ unsigned char type1;
+ long mask;
+ int shift;
+
+ if (mk_conf_addr(bus, devfn, where, &pci_addr, &type1))
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ mask = (size - 1) * 8;
+ shift = (where & 3) * 8;
+ addr = (pci_addr << 5) + mask + APECS_CONF;
+ *value = conf_read(addr, type1) >> (shift);
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int
+apecs_write_config(struct pci_bus *bus, unsigned int devfn, int where,
+ int size, u32 value)
+{
+ unsigned long addr, pci_addr;
+ unsigned char type1;
+ long mask;
+
+ if (mk_conf_addr(bus, devfn, where, &pci_addr, &type1))
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ mask = (size - 1) * 8;
+ addr = (pci_addr << 5) + mask + APECS_CONF;
+ conf_write(addr, value << ((where & 3) * 8), type1);
+ return PCIBIOS_SUCCESSFUL;
+}
+
+struct pci_ops apecs_pci_ops =
+{
+ .read = apecs_read_config,
+ .write = apecs_write_config,
+};
+
+void
+apecs_pci_tbi(struct pci_controller *hose, dma_addr_t start, dma_addr_t end)
+{
+ wmb();
+ *(vip)APECS_IOC_TBIA = 0;
+ mb();
+}
+
+void __init
+apecs_init_arch(void)
+{
+ struct pci_controller *hose;
+
+ /*
+ * Create our single hose.
+ */
+
+ pci_isa_hose = hose = alloc_pci_controller();
+ hose->io_space = &ioport_resource;
+ hose->mem_space = &iomem_resource;
+ hose->index = 0;
+
+ hose->sparse_mem_base = APECS_SPARSE_MEM - IDENT_ADDR;
+ hose->dense_mem_base = APECS_DENSE_MEM - IDENT_ADDR;
+ hose->sparse_io_base = APECS_IO - IDENT_ADDR;
+ hose->dense_io_base = 0;
+
+ /*
+ * Set up the PCI to main memory translation windows.
+ *
+ * Window 1 is direct access 1GB at 1GB
+ * Window 2 is scatter-gather 8MB at 8MB (for isa)
+ */
+ hose->sg_isa = iommu_arena_new(hose, 0x00800000, 0x00800000, 0);
+ hose->sg_pci = NULL;
+ __direct_map_base = 0x40000000;
+ __direct_map_size = 0x40000000;
+
+ *(vuip)APECS_IOC_PB1R = __direct_map_base | 0x00080000;
+ *(vuip)APECS_IOC_PM1R = (__direct_map_size - 1) & 0xfff00000U;
+ *(vuip)APECS_IOC_TB1R = 0;
+
+ *(vuip)APECS_IOC_PB2R = hose->sg_isa->dma_base | 0x000c0000;
+ *(vuip)APECS_IOC_PM2R = (hose->sg_isa->size - 1) & 0xfff00000;
+ *(vuip)APECS_IOC_TB2R = virt_to_phys(hose->sg_isa->ptes) >> 1;
+
+ apecs_pci_tbi(hose, 0, -1);
+
+ /*
+ * Finally, clear the HAXR2 register, which gets used
+ * for PCI Config Space accesses. That is the way
+ * we want to use it, and we do not want to depend on
+ * what ARC or SRM might have left behind...
+ */
+ *(vuip)APECS_IOC_HAXR2 = 0;
+ mb();
+}
+
+void
+apecs_pci_clr_err(void)
+{
+ unsigned int jd;
+
+ jd = *(vuip)APECS_IOC_DCSR;
+ if (jd & 0xffe0L) {
+ *(vuip)APECS_IOC_SEAR;
+ *(vuip)APECS_IOC_DCSR = jd | 0xffe1L;
+ mb();
+ *(vuip)APECS_IOC_DCSR;
+ }
+ *(vuip)APECS_IOC_TBIA = (unsigned int)APECS_IOC_TBIA;
+ mb();
+ *(vuip)APECS_IOC_TBIA;
+}
+
+void
+apecs_machine_check(unsigned long vector, unsigned long la_ptr,
+ struct pt_regs * regs)
+{
+ struct el_common *mchk_header;
+ struct el_apecs_procdata *mchk_procdata;
+ struct el_apecs_sysdata_mcheck *mchk_sysdata;
+
+ mchk_header = (struct el_common *)la_ptr;
+
+ mchk_procdata = (struct el_apecs_procdata *)
+ (la_ptr + mchk_header->proc_offset
+ - sizeof(mchk_procdata->paltemp));
+
+ mchk_sysdata = (struct el_apecs_sysdata_mcheck *)
+ (la_ptr + mchk_header->sys_offset);
+
+
+ /* Clear the error before any reporting. */
+ mb();
+ mb(); /* magic */
+ draina();
+ apecs_pci_clr_err();
+ wrmces(0x7); /* reset machine check pending flag */
+ mb();
+
+ process_mcheck_info(vector, la_ptr, regs, "APECS",
+ (mcheck_expected(0)
+ && (mchk_sysdata->epic_dcsr & 0x0c00UL)));
+}
diff --git a/arch/alpha/kernel/core_cia.c b/arch/alpha/kernel/core_cia.c
new file mode 100644
index 000000000000..fd563064363c
--- /dev/null
+++ b/arch/alpha/kernel/core_cia.c
@@ -0,0 +1,1212 @@
+/*
+ * linux/arch/alpha/kernel/core_cia.c
+ *
+ * Written by David A Rusling (david.rusling@reo.mts.dec.com).
+ * December 1995.
+ *
+ * Copyright (C) 1995 David A Rusling
+ * Copyright (C) 1997, 1998 Jay Estabrook
+ * Copyright (C) 1998, 1999, 2000 Richard Henderson
+ *
+ * Code common to all CIA core logic chips.
+ */
+
+#define __EXTERN_INLINE inline
+#include <asm/io.h>
+#include <asm/core_cia.h>
+#undef __EXTERN_INLINE
+
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/sched.h>
+#include <linux/init.h>
+#include <linux/bootmem.h>
+
+#include <asm/ptrace.h>
+
+#include "proto.h"
+#include "pci_impl.h"
+
+
+/*
+ * NOTE: Herein lie back-to-back mb instructions. They are magic.
+ * One plausible explanation is that the i/o controller does not properly
+ * handle the system transaction. Another involves timing. Ho hum.
+ */
+
+#define DEBUG_CONFIG 0
+#if DEBUG_CONFIG
+# define DBGC(args) printk args
+#else
+# define DBGC(args)
+#endif
+
+#define vip volatile int *
+
+/*
+ * Given a bus, device, and function number, compute resulting
+ * configuration space address. It is therefore not safe to have
+ * concurrent invocations to configuration space access routines, but
+ * there really shouldn't be any need for this.
+ *
+ * Type 0:
+ *
+ * 3 3|3 3 2 2|2 2 2 2|2 2 2 2|1 1 1 1|1 1 1 1|1 1
+ * 3 2|1 0 9 8|7 6 5 4|3 2 1 0|9 8 7 6|5 4 3 2|1 0 9 8|7 6 5 4|3 2 1 0
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | | |D|D|D|D|D|D|D|D|D|D|D|D|D|D|D|D|D|D|D|D|D|F|F|F|R|R|R|R|R|R|0|0|
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ *
+ * 31:11 Device select bit.
+ * 10:8 Function number
+ * 7:2 Register number
+ *
+ * Type 1:
+ *
+ * 3 3|3 3 2 2|2 2 2 2|2 2 2 2|1 1 1 1|1 1 1 1|1 1
+ * 3 2|1 0 9 8|7 6 5 4|3 2 1 0|9 8 7 6|5 4 3 2|1 0 9 8|7 6 5 4|3 2 1 0
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | | | | | | | | | | |B|B|B|B|B|B|B|B|D|D|D|D|D|F|F|F|R|R|R|R|R|R|0|1|
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ *
+ * 31:24 reserved
+ * 23:16 bus number (8 bits = 128 possible buses)
+ * 15:11 Device number (5 bits)
+ * 10:8 function number
+ * 7:2 register number
+ *
+ * Notes:
+ * The function number selects which function of a multi-function device
+ * (e.g., SCSI and Ethernet).
+ *
+ * The register selects a DWORD (32 bit) register offset. Hence it
+ * doesn't get shifted by 2 bits as we want to "drop" the bottom two
+ * bits.
+ */
+
+static int
+mk_conf_addr(struct pci_bus *bus_dev, unsigned int device_fn, int where,
+ unsigned long *pci_addr, unsigned char *type1)
+{
+ u8 bus = bus_dev->number;
+
+ *type1 = (bus != 0);
+ *pci_addr = (bus << 16) | (device_fn << 8) | where;
+
+ DBGC(("mk_conf_addr(bus=%d ,device_fn=0x%x, where=0x%x,"
+ " returning address 0x%p\n"
+ bus, device_fn, where, *pci_addr));
+
+ return 0;
+}
+
+static unsigned int
+conf_read(unsigned long addr, unsigned char type1)
+{
+ unsigned long flags;
+ int stat0, value;
+ int cia_cfg = 0;
+
+ DBGC(("conf_read(addr=0x%lx, type1=%d) ", addr, type1));
+ local_irq_save(flags);
+
+ /* Reset status register to avoid losing errors. */
+ stat0 = *(vip)CIA_IOC_CIA_ERR;
+ *(vip)CIA_IOC_CIA_ERR = stat0;
+ mb();
+ *(vip)CIA_IOC_CIA_ERR; /* re-read to force write */
+
+ /* If Type1 access, must set CIA CFG. */
+ if (type1) {
+ cia_cfg = *(vip)CIA_IOC_CFG;
+ *(vip)CIA_IOC_CFG = (cia_cfg & ~3) | 1;
+ mb();
+ *(vip)CIA_IOC_CFG;
+ }
+
+ mb();
+ draina();
+ mcheck_expected(0) = 1;
+ mcheck_taken(0) = 0;
+ mb();
+
+ /* Access configuration space. */
+ value = *(vip)addr;
+ mb();
+ mb(); /* magic */
+ if (mcheck_taken(0)) {
+ mcheck_taken(0) = 0;
+ value = 0xffffffff;
+ mb();
+ }
+ mcheck_expected(0) = 0;
+ mb();
+
+ /* If Type1 access, must reset IOC CFG so normal IO space ops work. */
+ if (type1) {
+ *(vip)CIA_IOC_CFG = cia_cfg;
+ mb();
+ *(vip)CIA_IOC_CFG;
+ }
+
+ local_irq_restore(flags);
+ DBGC(("done\n"));
+
+ return value;
+}
+
+static void
+conf_write(unsigned long addr, unsigned int value, unsigned char type1)
+{
+ unsigned long flags;
+ int stat0, cia_cfg = 0;
+
+ DBGC(("conf_write(addr=0x%lx, type1=%d) ", addr, type1));
+ local_irq_save(flags);
+
+ /* Reset status register to avoid losing errors. */
+ stat0 = *(vip)CIA_IOC_CIA_ERR;
+ *(vip)CIA_IOC_CIA_ERR = stat0;
+ mb();
+ *(vip)CIA_IOC_CIA_ERR; /* re-read to force write */
+
+ /* If Type1 access, must set CIA CFG. */
+ if (type1) {
+ cia_cfg = *(vip)CIA_IOC_CFG;
+ *(vip)CIA_IOC_CFG = (cia_cfg & ~3) | 1;
+ mb();
+ *(vip)CIA_IOC_CFG;
+ }
+
+ mb();
+ draina();
+ mcheck_expected(0) = 1;
+ mcheck_taken(0) = 0;
+ mb();
+
+ /* Access configuration space. */
+ *(vip)addr = value;
+ mb();
+ *(vip)addr; /* read back to force the write */
+
+ mcheck_expected(0) = 0;
+ mb();
+
+ /* If Type1 access, must reset IOC CFG so normal IO space ops work. */
+ if (type1) {
+ *(vip)CIA_IOC_CFG = cia_cfg;
+ mb();
+ *(vip)CIA_IOC_CFG;
+ }
+
+ local_irq_restore(flags);
+ DBGC(("done\n"));
+}
+
+static int
+cia_read_config(struct pci_bus *bus, unsigned int devfn, int where, int size,
+ u32 *value)
+{
+ unsigned long addr, pci_addr;
+ long mask;
+ unsigned char type1;
+ int shift;
+
+ if (mk_conf_addr(bus, devfn, where, &pci_addr, &type1))
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ mask = (size - 1) * 8;
+ shift = (where & 3) * 8;
+ addr = (pci_addr << 5) + mask + CIA_CONF;
+ *value = conf_read(addr, type1) >> (shift);
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int
+cia_write_config(struct pci_bus *bus, unsigned int devfn, int where, int size,
+ u32 value)
+{
+ unsigned long addr, pci_addr;
+ long mask;
+ unsigned char type1;
+
+ if (mk_conf_addr(bus, devfn, where, &pci_addr, &type1))
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ mask = (size - 1) * 8;
+ addr = (pci_addr << 5) + mask + CIA_CONF;
+ conf_write(addr, value << ((where & 3) * 8), type1);
+ return PCIBIOS_SUCCESSFUL;
+}
+
+struct pci_ops cia_pci_ops =
+{
+ .read = cia_read_config,
+ .write = cia_write_config,
+};
+
+/*
+ * CIA Pass 1 and PYXIS Pass 1 and 2 have a broken scatter-gather tlb.
+ * It cannot be invalidated. Rather than hard code the pass numbers,
+ * actually try the tbia to see if it works.
+ */
+
+void
+cia_pci_tbi(struct pci_controller *hose, dma_addr_t start, dma_addr_t end)
+{
+ wmb();
+ *(vip)CIA_IOC_PCI_TBIA = 3; /* Flush all locked and unlocked. */
+ mb();
+ *(vip)CIA_IOC_PCI_TBIA;
+}
+
+/*
+ * On PYXIS, even if the tbia works, we cannot use it. It effectively locks
+ * the chip (as well as direct write to the tag registers) if there is a
+ * SG DMA operation in progress. This is true at least for PYXIS rev. 1,
+ * so always use the method below.
+ */
+/*
+ * This is the method NT and NetBSD use.
+ *
+ * Allocate mappings, and put the chip into DMA loopback mode to read a
+ * garbage page. This works by causing TLB misses, causing old entries to
+ * be purged to make room for the new entries coming in for the garbage page.
+ */
+
+#define CIA_BROKEN_TBIA_BASE 0x30000000
+#define CIA_BROKEN_TBIA_SIZE 1024
+
+/* Always called with interrupts disabled */
+void
+cia_pci_tbi_try2(struct pci_controller *hose,
+ dma_addr_t start, dma_addr_t end)
+{
+ void __iomem *bus_addr;
+ int ctrl;
+
+ /* Put the chip into PCI loopback mode. */
+ mb();
+ ctrl = *(vip)CIA_IOC_CIA_CTRL;
+ *(vip)CIA_IOC_CIA_CTRL = ctrl | CIA_CTRL_PCI_LOOP_EN;
+ mb();
+ *(vip)CIA_IOC_CIA_CTRL;
+ mb();
+
+ /* Read from PCI dense memory space at TBI_ADDR, skipping 32k on
+ each read. This forces SG TLB misses. NetBSD claims that the
+ TLB entries are not quite LRU, meaning that we need to read more
+ times than there are actual tags. The 2117x docs claim strict
+ round-robin. Oh well, we've come this far... */
+ /* Even better - as seen on the PYXIS rev 1 the TLB tags 0-3 can
+ be filled by the TLB misses *only once* after being invalidated
+ (by tbia or direct write). Next misses won't update them even
+ though the lock bits are cleared. Tags 4-7 are "quite LRU" though,
+ so use them and read at window 3 base exactly 4 times. Reading
+ more sometimes makes the chip crazy. -ink */
+
+ bus_addr = cia_ioremap(CIA_BROKEN_TBIA_BASE, 32768 * 4);
+
+ cia_readl(bus_addr + 0x00000);
+ cia_readl(bus_addr + 0x08000);
+ cia_readl(bus_addr + 0x10000);
+ cia_readl(bus_addr + 0x18000);
+
+ cia_iounmap(bus_addr);
+
+ /* Restore normal PCI operation. */
+ mb();
+ *(vip)CIA_IOC_CIA_CTRL = ctrl;
+ mb();
+ *(vip)CIA_IOC_CIA_CTRL;
+ mb();
+}
+
+static inline void
+cia_prepare_tbia_workaround(int window)
+{
+ unsigned long *ppte, pte;
+ long i;
+
+ /* Use minimal 1K map. */
+ ppte = __alloc_bootmem(CIA_BROKEN_TBIA_SIZE, 32768, 0);
+ pte = (virt_to_phys(ppte) >> (PAGE_SHIFT - 1)) | 1;
+
+ for (i = 0; i < CIA_BROKEN_TBIA_SIZE / sizeof(unsigned long); ++i)
+ ppte[i] = pte;
+
+ *(vip)CIA_IOC_PCI_Wn_BASE(window) = CIA_BROKEN_TBIA_BASE | 3;
+ *(vip)CIA_IOC_PCI_Wn_MASK(window)
+ = (CIA_BROKEN_TBIA_SIZE*1024 - 1) & 0xfff00000;
+ *(vip)CIA_IOC_PCI_Tn_BASE(window) = virt_to_phys(ppte) >> 2;
+}
+
+static void __init
+verify_tb_operation(void)
+{
+ static int page[PAGE_SIZE/4]
+ __attribute__((aligned(PAGE_SIZE)))
+ __initdata = { 0 };
+
+ struct pci_iommu_arena *arena = pci_isa_hose->sg_isa;
+ int ctrl, addr0, tag0, pte0, data0;
+ int temp, use_tbia_try2 = 0;
+ void __iomem *bus_addr;
+
+ /* pyxis -- tbia is broken */
+ if (pci_isa_hose->dense_io_base)
+ use_tbia_try2 = 1;
+
+ /* Put the chip into PCI loopback mode. */
+ mb();
+ ctrl = *(vip)CIA_IOC_CIA_CTRL;
+ *(vip)CIA_IOC_CIA_CTRL = ctrl | CIA_CTRL_PCI_LOOP_EN;
+ mb();
+ *(vip)CIA_IOC_CIA_CTRL;
+ mb();
+
+ /* Write a valid entry directly into the TLB registers. */
+
+ addr0 = arena->dma_base;
+ tag0 = addr0 | 1;
+ pte0 = (virt_to_phys(page) >> (PAGE_SHIFT - 1)) | 1;
+
+ *(vip)CIA_IOC_TB_TAGn(0) = tag0;
+ *(vip)CIA_IOC_TB_TAGn(1) = 0;
+ *(vip)CIA_IOC_TB_TAGn(2) = 0;
+ *(vip)CIA_IOC_TB_TAGn(3) = 0;
+ *(vip)CIA_IOC_TB_TAGn(4) = 0;
+ *(vip)CIA_IOC_TB_TAGn(5) = 0;
+ *(vip)CIA_IOC_TB_TAGn(6) = 0;
+ *(vip)CIA_IOC_TB_TAGn(7) = 0;
+ *(vip)CIA_IOC_TBn_PAGEm(0,0) = pte0;
+ *(vip)CIA_IOC_TBn_PAGEm(0,1) = 0;
+ *(vip)CIA_IOC_TBn_PAGEm(0,2) = 0;
+ *(vip)CIA_IOC_TBn_PAGEm(0,3) = 0;
+ mb();
+
+ /* Get a usable bus address */
+ bus_addr = cia_ioremap(addr0, 8*PAGE_SIZE);
+
+ /* First, verify we can read back what we've written. If
+ this fails, we can't be sure of any of the other testing
+ we're going to do, so bail. */
+ /* ??? Actually, we could do the work with machine checks.
+ By passing this register update test, we pretty much
+ guarantee that cia_pci_tbi_try1 works. If this test
+ fails, cia_pci_tbi_try2 might still work. */
+
+ temp = *(vip)CIA_IOC_TB_TAGn(0);
+ if (temp != tag0) {
+ printk("pci: failed tb register update test "
+ "(tag0 %#x != %#x)\n", temp, tag0);
+ goto failed;
+ }
+ temp = *(vip)CIA_IOC_TB_TAGn(1);
+ if (temp != 0) {
+ printk("pci: failed tb register update test "
+ "(tag1 %#x != 0)\n", temp);
+ goto failed;
+ }
+ temp = *(vip)CIA_IOC_TBn_PAGEm(0,0);
+ if (temp != pte0) {
+ printk("pci: failed tb register update test "
+ "(pte0 %#x != %#x)\n", temp, pte0);
+ goto failed;
+ }
+ printk("pci: passed tb register update test\n");
+
+ /* Second, verify we can actually do I/O through this entry. */
+
+ data0 = 0xdeadbeef;
+ page[0] = data0;
+ mcheck_expected(0) = 1;
+ mcheck_taken(0) = 0;
+ mb();
+ temp = cia_readl(bus_addr);
+ mb();
+ mcheck_expected(0) = 0;
+ mb();
+ if (mcheck_taken(0)) {
+ printk("pci: failed sg loopback i/o read test (mcheck)\n");
+ goto failed;
+ }
+ if (temp != data0) {
+ printk("pci: failed sg loopback i/o read test "
+ "(%#x != %#x)\n", temp, data0);
+ goto failed;
+ }
+ printk("pci: passed sg loopback i/o read test\n");
+
+ /* Third, try to invalidate the TLB. */
+
+ if (! use_tbia_try2) {
+ cia_pci_tbi(arena->hose, 0, -1);
+ temp = *(vip)CIA_IOC_TB_TAGn(0);
+ if (temp & 1) {
+ use_tbia_try2 = 1;
+ printk("pci: failed tbia test; workaround available\n");
+ } else {
+ printk("pci: passed tbia test\n");
+ }
+ }
+
+ /* Fourth, verify the TLB snoops the EV5's caches when
+ doing a tlb fill. */
+
+ data0 = 0x5adda15e;
+ page[0] = data0;
+ arena->ptes[4] = pte0;
+ mcheck_expected(0) = 1;
+ mcheck_taken(0) = 0;
+ mb();
+ temp = cia_readl(bus_addr + 4*PAGE_SIZE);
+ mb();
+ mcheck_expected(0) = 0;
+ mb();
+ if (mcheck_taken(0)) {
+ printk("pci: failed pte write cache snoop test (mcheck)\n");
+ goto failed;
+ }
+ if (temp != data0) {
+ printk("pci: failed pte write cache snoop test "
+ "(%#x != %#x)\n", temp, data0);
+ goto failed;
+ }
+ printk("pci: passed pte write cache snoop test\n");
+
+ /* Fifth, verify that a previously invalid PTE entry gets
+ filled from the page table. */
+
+ data0 = 0xabcdef12;
+ page[0] = data0;
+ arena->ptes[5] = pte0;
+ mcheck_expected(0) = 1;
+ mcheck_taken(0) = 0;
+ mb();
+ temp = cia_readl(bus_addr + 5*PAGE_SIZE);
+ mb();
+ mcheck_expected(0) = 0;
+ mb();
+ if (mcheck_taken(0)) {
+ printk("pci: failed valid tag invalid pte reload test "
+ "(mcheck; workaround available)\n");
+ /* Work around this bug by aligning new allocations
+ on 4 page boundaries. */
+ arena->align_entry = 4;
+ } else if (temp != data0) {
+ printk("pci: failed valid tag invalid pte reload test "
+ "(%#x != %#x)\n", temp, data0);
+ goto failed;
+ } else {
+ printk("pci: passed valid tag invalid pte reload test\n");
+ }
+
+ /* Sixth, verify machine checks are working. Test invalid
+ pte under the same valid tag as we used above. */
+
+ mcheck_expected(0) = 1;
+ mcheck_taken(0) = 0;
+ mb();
+ temp = cia_readl(bus_addr + 6*PAGE_SIZE);
+ mb();
+ mcheck_expected(0) = 0;
+ mb();
+ printk("pci: %s pci machine check test\n",
+ mcheck_taken(0) ? "passed" : "failed");
+
+ /* Clean up after the tests. */
+ arena->ptes[4] = 0;
+ arena->ptes[5] = 0;
+
+ if (use_tbia_try2) {
+ alpha_mv.mv_pci_tbi = cia_pci_tbi_try2;
+
+ /* Tags 0-3 must be disabled if we use this workaraund. */
+ wmb();
+ *(vip)CIA_IOC_TB_TAGn(0) = 2;
+ *(vip)CIA_IOC_TB_TAGn(1) = 2;
+ *(vip)CIA_IOC_TB_TAGn(2) = 2;
+ *(vip)CIA_IOC_TB_TAGn(3) = 2;
+
+ printk("pci: tbia workaround enabled\n");
+ }
+ alpha_mv.mv_pci_tbi(arena->hose, 0, -1);
+
+exit:
+ /* unmap the bus addr */
+ cia_iounmap(bus_addr);
+
+ /* Restore normal PCI operation. */
+ mb();
+ *(vip)CIA_IOC_CIA_CTRL = ctrl;
+ mb();
+ *(vip)CIA_IOC_CIA_CTRL;
+ mb();
+ return;
+
+failed:
+ printk("pci: disabling sg translation window\n");
+ *(vip)CIA_IOC_PCI_W0_BASE = 0;
+ *(vip)CIA_IOC_PCI_W1_BASE = 0;
+ pci_isa_hose->sg_isa = NULL;
+ alpha_mv.mv_pci_tbi = NULL;
+ goto exit;
+}
+
+#if defined(ALPHA_RESTORE_SRM_SETUP)
+/* Save CIA configuration data as the console had it set up. */
+struct
+{
+ unsigned int hae_mem;
+ unsigned int hae_io;
+ unsigned int pci_dac_offset;
+ unsigned int err_mask;
+ unsigned int cia_ctrl;
+ unsigned int cia_cnfg;
+ struct {
+ unsigned int w_base;
+ unsigned int w_mask;
+ unsigned int t_base;
+ } window[4];
+} saved_config __attribute((common));
+
+void
+cia_save_srm_settings(int is_pyxis)
+{
+ int i;
+
+ /* Save some important registers. */
+ saved_config.err_mask = *(vip)CIA_IOC_ERR_MASK;
+ saved_config.cia_ctrl = *(vip)CIA_IOC_CIA_CTRL;
+ saved_config.hae_mem = *(vip)CIA_IOC_HAE_MEM;
+ saved_config.hae_io = *(vip)CIA_IOC_HAE_IO;
+ saved_config.pci_dac_offset = *(vip)CIA_IOC_PCI_W_DAC;
+
+ if (is_pyxis)
+ saved_config.cia_cnfg = *(vip)CIA_IOC_CIA_CNFG;
+ else
+ saved_config.cia_cnfg = 0;
+
+ /* Save DMA windows configuration. */
+ for (i = 0; i < 4; i++) {
+ saved_config.window[i].w_base = *(vip)CIA_IOC_PCI_Wn_BASE(i);
+ saved_config.window[i].w_mask = *(vip)CIA_IOC_PCI_Wn_MASK(i);
+ saved_config.window[i].t_base = *(vip)CIA_IOC_PCI_Tn_BASE(i);
+ }
+ mb();
+}
+
+void
+cia_restore_srm_settings(void)
+{
+ int i;
+
+ for (i = 0; i < 4; i++) {
+ *(vip)CIA_IOC_PCI_Wn_BASE(i) = saved_config.window[i].w_base;
+ *(vip)CIA_IOC_PCI_Wn_MASK(i) = saved_config.window[i].w_mask;
+ *(vip)CIA_IOC_PCI_Tn_BASE(i) = saved_config.window[i].t_base;
+ }
+
+ *(vip)CIA_IOC_HAE_MEM = saved_config.hae_mem;
+ *(vip)CIA_IOC_HAE_IO = saved_config.hae_io;
+ *(vip)CIA_IOC_PCI_W_DAC = saved_config.pci_dac_offset;
+ *(vip)CIA_IOC_ERR_MASK = saved_config.err_mask;
+ *(vip)CIA_IOC_CIA_CTRL = saved_config.cia_ctrl;
+
+ if (saved_config.cia_cnfg) /* Must be pyxis. */
+ *(vip)CIA_IOC_CIA_CNFG = saved_config.cia_cnfg;
+
+ mb();
+}
+#else /* ALPHA_RESTORE_SRM_SETUP */
+#define cia_save_srm_settings(p) do {} while (0)
+#define cia_restore_srm_settings() do {} while (0)
+#endif /* ALPHA_RESTORE_SRM_SETUP */
+
+
+static void __init
+do_init_arch(int is_pyxis)
+{
+ struct pci_controller *hose;
+ int temp, cia_rev, tbia_window;
+
+ cia_rev = *(vip)CIA_IOC_CIA_REV & CIA_REV_MASK;
+ printk("pci: cia revision %d%s\n",
+ cia_rev, is_pyxis ? " (pyxis)" : "");
+
+ if (alpha_using_srm)
+ cia_save_srm_settings(is_pyxis);
+
+ /* Set up error reporting. */
+ temp = *(vip)CIA_IOC_ERR_MASK;
+ temp &= ~(CIA_ERR_CPU_PE | CIA_ERR_MEM_NEM | CIA_ERR_PA_PTE_INV
+ | CIA_ERR_RCVD_MAS_ABT | CIA_ERR_RCVD_TAR_ABT);
+ *(vip)CIA_IOC_ERR_MASK = temp;
+
+ /* Clear all currently pending errors. */
+ temp = *(vip)CIA_IOC_CIA_ERR;
+ *(vip)CIA_IOC_CIA_ERR = temp;
+
+ /* Turn on mchecks. */
+ temp = *(vip)CIA_IOC_CIA_CTRL;
+ temp |= CIA_CTRL_FILL_ERR_EN | CIA_CTRL_MCHK_ERR_EN;
+ *(vip)CIA_IOC_CIA_CTRL = temp;
+
+ /* Clear the CFG register, which gets used for PCI config space
+ accesses. That is the way we want to use it, and we do not
+ want to depend on what ARC or SRM might have left behind. */
+ *(vip)CIA_IOC_CFG = 0;
+
+ /* Zero the HAEs. */
+ *(vip)CIA_IOC_HAE_MEM = 0;
+ *(vip)CIA_IOC_HAE_IO = 0;
+
+ /* For PYXIS, we always use BWX bus and i/o accesses. To that end,
+ make sure they're enabled on the controller. At the same time,
+ enable the monster window. */
+ if (is_pyxis) {
+ temp = *(vip)CIA_IOC_CIA_CNFG;
+ temp |= CIA_CNFG_IOA_BWEN | CIA_CNFG_PCI_MWEN;
+ *(vip)CIA_IOC_CIA_CNFG = temp;
+ }
+
+ /* Synchronize with all previous changes. */
+ mb();
+ *(vip)CIA_IOC_CIA_REV;
+
+ /*
+ * Create our single hose.
+ */
+
+ pci_isa_hose = hose = alloc_pci_controller();
+ hose->io_space = &ioport_resource;
+ hose->mem_space = &iomem_resource;
+ hose->index = 0;
+
+ if (! is_pyxis) {
+ struct resource *hae_mem = alloc_resource();
+ hose->mem_space = hae_mem;
+
+ hae_mem->start = 0;
+ hae_mem->end = CIA_MEM_R1_MASK;
+ hae_mem->name = pci_hae0_name;
+ hae_mem->flags = IORESOURCE_MEM;
+
+ if (request_resource(&iomem_resource, hae_mem) < 0)
+ printk(KERN_ERR "Failed to request HAE_MEM\n");
+
+ hose->sparse_mem_base = CIA_SPARSE_MEM - IDENT_ADDR;
+ hose->dense_mem_base = CIA_DENSE_MEM - IDENT_ADDR;
+ hose->sparse_io_base = CIA_IO - IDENT_ADDR;
+ hose->dense_io_base = 0;
+ } else {
+ hose->sparse_mem_base = 0;
+ hose->dense_mem_base = CIA_BW_MEM - IDENT_ADDR;
+ hose->sparse_io_base = 0;
+ hose->dense_io_base = CIA_BW_IO - IDENT_ADDR;
+ }
+
+ /*
+ * Set up the PCI to main memory translation windows.
+ *
+ * Window 0 is S/G 8MB at 8MB (for isa)
+ * Window 1 is S/G 1MB at 768MB (for tbia) (unused for CIA rev 1)
+ * Window 2 is direct access 2GB at 2GB
+ * Window 3 is DAC access 4GB at 8GB (or S/G for tbia if CIA rev 1)
+ *
+ * ??? NetBSD hints that page tables must be aligned to 32K,
+ * possibly due to a hardware bug. This is over-aligned
+ * from the 8K alignment one would expect for an 8MB window.
+ * No description of what revisions affected.
+ */
+
+ hose->sg_pci = NULL;
+ hose->sg_isa = iommu_arena_new(hose, 0x00800000, 0x00800000, 32768);
+
+ __direct_map_base = 0x80000000;
+ __direct_map_size = 0x80000000;
+
+ *(vip)CIA_IOC_PCI_W0_BASE = hose->sg_isa->dma_base | 3;
+ *(vip)CIA_IOC_PCI_W0_MASK = (hose->sg_isa->size - 1) & 0xfff00000;
+ *(vip)CIA_IOC_PCI_T0_BASE = virt_to_phys(hose->sg_isa->ptes) >> 2;
+
+ *(vip)CIA_IOC_PCI_W2_BASE = __direct_map_base | 1;
+ *(vip)CIA_IOC_PCI_W2_MASK = (__direct_map_size - 1) & 0xfff00000;
+ *(vip)CIA_IOC_PCI_T2_BASE = 0 >> 2;
+
+ /* On PYXIS we have the monster window, selected by bit 40, so
+ there is no need for window3 to be enabled.
+
+ On CIA, we don't have true arbitrary addressing -- bits <39:32>
+ are compared against W_DAC. We can, however, directly map 4GB,
+ which is better than before. However, due to assumptions made
+ elsewhere, we should not claim that we support DAC unless that
+ 4GB covers all of physical memory.
+
+ On CIA rev 1, apparently W1 and W2 can't be used for SG.
+ At least, there are reports that it doesn't work for Alcor.
+ In that case, we have no choice but to use W3 for the TBIA
+ workaround, which means we can't use DAC at all. */
+
+ tbia_window = 1;
+ if (is_pyxis) {
+ *(vip)CIA_IOC_PCI_W3_BASE = 0;
+ } else if (cia_rev == 1) {
+ *(vip)CIA_IOC_PCI_W1_BASE = 0;
+ tbia_window = 3;
+ } else if (max_low_pfn > (0x100000000UL >> PAGE_SHIFT)) {
+ *(vip)CIA_IOC_PCI_W3_BASE = 0;
+ } else {
+ *(vip)CIA_IOC_PCI_W3_BASE = 0x00000000 | 1 | 8;
+ *(vip)CIA_IOC_PCI_W3_MASK = 0xfff00000;
+ *(vip)CIA_IOC_PCI_T3_BASE = 0 >> 2;
+
+ alpha_mv.pci_dac_offset = 0x200000000UL;
+ *(vip)CIA_IOC_PCI_W_DAC = alpha_mv.pci_dac_offset >> 32;
+ }
+
+ /* Prepare workaround for apparently broken tbia. */
+ cia_prepare_tbia_workaround(tbia_window);
+}
+
+void __init
+cia_init_arch(void)
+{
+ do_init_arch(0);
+}
+
+void __init
+pyxis_init_arch(void)
+{
+ /* On pyxis machines we can precisely calculate the
+ CPU clock frequency using pyxis real time counter.
+ It's especially useful for SX164 with broken RTC.
+
+ Both CPU and chipset are driven by the single 16.666M
+ or 16.667M crystal oscillator. PYXIS_RT_COUNT clock is
+ 66.66 MHz. -ink */
+
+ unsigned int cc0, cc1;
+ unsigned long pyxis_cc;
+
+ __asm__ __volatile__ ("rpcc %0" : "=r"(cc0));
+ pyxis_cc = *(vulp)PYXIS_RT_COUNT;
+ do { } while(*(vulp)PYXIS_RT_COUNT - pyxis_cc < 4096);
+ __asm__ __volatile__ ("rpcc %0" : "=r"(cc1));
+ cc1 -= cc0;
+ hwrpb->cycle_freq = ((cc1 >> 11) * 100000000UL) / 3;
+ hwrpb_update_checksum(hwrpb);
+
+ do_init_arch(1);
+}
+
+void
+cia_kill_arch(int mode)
+{
+ if (alpha_using_srm)
+ cia_restore_srm_settings();
+}
+
+void __init
+cia_init_pci(void)
+{
+ /* Must delay this from init_arch, as we need machine checks. */
+ verify_tb_operation();
+ common_init_pci();
+}
+
+static inline void
+cia_pci_clr_err(void)
+{
+ int jd;
+
+ jd = *(vip)CIA_IOC_CIA_ERR;
+ *(vip)CIA_IOC_CIA_ERR = jd;
+ mb();
+ *(vip)CIA_IOC_CIA_ERR; /* re-read to force write. */
+}
+
+#ifdef CONFIG_VERBOSE_MCHECK
+static void
+cia_decode_pci_error(struct el_CIA_sysdata_mcheck *cia, const char *msg)
+{
+ static const char * const pci_cmd_desc[16] = {
+ "Interrupt Acknowledge", "Special Cycle", "I/O Read",
+ "I/O Write", "Reserved 0x4", "Reserved 0x5", "Memory Read",
+ "Memory Write", "Reserved 0x8", "Reserved 0x9",
+ "Configuration Read", "Configuration Write",
+ "Memory Read Multiple", "Dual Address Cycle",
+ "Memory Read Line", "Memory Write and Invalidate"
+ };
+
+ if (cia->cia_err & (CIA_ERR_COR_ERR
+ | CIA_ERR_UN_COR_ERR
+ | CIA_ERR_MEM_NEM
+ | CIA_ERR_PA_PTE_INV)) {
+ static const char * const window_desc[6] = {
+ "No window active", "Window 0 hit", "Window 1 hit",
+ "Window 2 hit", "Window 3 hit", "Monster window hit"
+ };
+
+ const char *window;
+ const char *cmd;
+ unsigned long addr, tmp;
+ int lock, dac;
+
+ cmd = pci_cmd_desc[cia->pci_err0 & 0x7];
+ lock = (cia->pci_err0 >> 4) & 1;
+ dac = (cia->pci_err0 >> 5) & 1;
+
+ tmp = (cia->pci_err0 >> 8) & 0x1F;
+ tmp = ffs(tmp);
+ window = window_desc[tmp];
+
+ addr = cia->pci_err1;
+ if (dac) {
+ tmp = *(vip)CIA_IOC_PCI_W_DAC & 0xFFUL;
+ addr |= tmp << 32;
+ }
+
+ printk(KERN_CRIT "CIA machine check: %s\n", msg);
+ printk(KERN_CRIT " DMA command: %s\n", cmd);
+ printk(KERN_CRIT " PCI address: %#010lx\n", addr);
+ printk(KERN_CRIT " %s, Lock: %d, DAC: %d\n",
+ window, lock, dac);
+ } else if (cia->cia_err & (CIA_ERR_PERR
+ | CIA_ERR_PCI_ADDR_PE
+ | CIA_ERR_RCVD_MAS_ABT
+ | CIA_ERR_RCVD_TAR_ABT
+ | CIA_ERR_IOA_TIMEOUT)) {
+ static const char * const master_st_desc[16] = {
+ "Idle", "Drive bus", "Address step cycle",
+ "Address cycle", "Data cycle", "Last read data cycle",
+ "Last write data cycle", "Read stop cycle",
+ "Write stop cycle", "Read turnaround cycle",
+ "Write turnaround cycle", "Reserved 0xB",
+ "Reserved 0xC", "Reserved 0xD", "Reserved 0xE",
+ "Unknown state"
+ };
+ static const char * const target_st_desc[16] = {
+ "Idle", "Busy", "Read data cycle", "Write data cycle",
+ "Read stop cycle", "Write stop cycle",
+ "Read turnaround cycle", "Write turnaround cycle",
+ "Read wait cycle", "Write wait cycle",
+ "Reserved 0xA", "Reserved 0xB", "Reserved 0xC",
+ "Reserved 0xD", "Reserved 0xE", "Unknown state"
+ };
+
+ const char *cmd;
+ const char *master, *target;
+ unsigned long addr, tmp;
+ int dac;
+
+ master = master_st_desc[(cia->pci_err0 >> 16) & 0xF];
+ target = target_st_desc[(cia->pci_err0 >> 20) & 0xF];
+ cmd = pci_cmd_desc[(cia->pci_err0 >> 24) & 0xF];
+ dac = (cia->pci_err0 >> 28) & 1;
+
+ addr = cia->pci_err2;
+ if (dac) {
+ tmp = *(volatile int *)CIA_IOC_PCI_W_DAC & 0xFFUL;
+ addr |= tmp << 32;
+ }
+
+ printk(KERN_CRIT "CIA machine check: %s\n", msg);
+ printk(KERN_CRIT " PCI command: %s\n", cmd);
+ printk(KERN_CRIT " Master state: %s, Target state: %s\n",
+ master, target);
+ printk(KERN_CRIT " PCI address: %#010lx, DAC: %d\n",
+ addr, dac);
+ } else {
+ printk(KERN_CRIT "CIA machine check: %s\n", msg);
+ printk(KERN_CRIT " Unknown PCI error\n");
+ printk(KERN_CRIT " PCI_ERR0 = %#08lx", cia->pci_err0);
+ printk(KERN_CRIT " PCI_ERR1 = %#08lx", cia->pci_err1);
+ printk(KERN_CRIT " PCI_ERR2 = %#08lx", cia->pci_err2);
+ }
+}
+
+static void
+cia_decode_mem_error(struct el_CIA_sysdata_mcheck *cia, const char *msg)
+{
+ unsigned long mem_port_addr;
+ unsigned long mem_port_mask;
+ const char *mem_port_cmd;
+ const char *seq_state;
+ const char *set_select;
+ unsigned long tmp;
+
+ /* If this is a DMA command, also decode the PCI bits. */
+ if ((cia->mem_err1 >> 20) & 1)
+ cia_decode_pci_error(cia, msg);
+ else
+ printk(KERN_CRIT "CIA machine check: %s\n", msg);
+
+ mem_port_addr = cia->mem_err0 & 0xfffffff0;
+ mem_port_addr |= (cia->mem_err1 & 0x83UL) << 32;
+
+ mem_port_mask = (cia->mem_err1 >> 12) & 0xF;
+
+ tmp = (cia->mem_err1 >> 8) & 0xF;
+ tmp |= ((cia->mem_err1 >> 20) & 1) << 4;
+ if ((tmp & 0x1E) == 0x06)
+ mem_port_cmd = "WRITE BLOCK or WRITE BLOCK LOCK";
+ else if ((tmp & 0x1C) == 0x08)
+ mem_port_cmd = "READ MISS or READ MISS MODIFY";
+ else if (tmp == 0x1C)
+ mem_port_cmd = "BC VICTIM";
+ else if ((tmp & 0x1E) == 0x0E)
+ mem_port_cmd = "READ MISS MODIFY";
+ else if ((tmp & 0x1C) == 0x18)
+ mem_port_cmd = "DMA READ or DMA READ MODIFY";
+ else if ((tmp & 0x1E) == 0x12)
+ mem_port_cmd = "DMA WRITE";
+ else
+ mem_port_cmd = "Unknown";
+
+ tmp = (cia->mem_err1 >> 16) & 0xF;
+ switch (tmp) {
+ case 0x0:
+ seq_state = "Idle";
+ break;
+ case 0x1:
+ seq_state = "DMA READ or DMA WRITE";
+ break;
+ case 0x2: case 0x3:
+ seq_state = "READ MISS (or READ MISS MODIFY) with victim";
+ break;
+ case 0x4: case 0x5: case 0x6:
+ seq_state = "READ MISS (or READ MISS MODIFY) with no victim";
+ break;
+ case 0x8: case 0x9: case 0xB:
+ seq_state = "Refresh";
+ break;
+ case 0xC:
+ seq_state = "Idle, waiting for DMA pending read";
+ break;
+ case 0xE: case 0xF:
+ seq_state = "Idle, ras precharge";
+ break;
+ default:
+ seq_state = "Unknown";
+ break;
+ }
+
+ tmp = (cia->mem_err1 >> 24) & 0x1F;
+ switch (tmp) {
+ case 0x00: set_select = "Set 0 selected"; break;
+ case 0x01: set_select = "Set 1 selected"; break;
+ case 0x02: set_select = "Set 2 selected"; break;
+ case 0x03: set_select = "Set 3 selected"; break;
+ case 0x04: set_select = "Set 4 selected"; break;
+ case 0x05: set_select = "Set 5 selected"; break;
+ case 0x06: set_select = "Set 6 selected"; break;
+ case 0x07: set_select = "Set 7 selected"; break;
+ case 0x08: set_select = "Set 8 selected"; break;
+ case 0x09: set_select = "Set 9 selected"; break;
+ case 0x0A: set_select = "Set A selected"; break;
+ case 0x0B: set_select = "Set B selected"; break;
+ case 0x0C: set_select = "Set C selected"; break;
+ case 0x0D: set_select = "Set D selected"; break;
+ case 0x0E: set_select = "Set E selected"; break;
+ case 0x0F: set_select = "Set F selected"; break;
+ case 0x10: set_select = "No set selected"; break;
+ case 0x1F: set_select = "Refresh cycle"; break;
+ default: set_select = "Unknown"; break;
+ }
+
+ printk(KERN_CRIT " Memory port command: %s\n", mem_port_cmd);
+ printk(KERN_CRIT " Memory port address: %#010lx, mask: %#lx\n",
+ mem_port_addr, mem_port_mask);
+ printk(KERN_CRIT " Memory sequencer state: %s\n", seq_state);
+ printk(KERN_CRIT " Memory set: %s\n", set_select);
+}
+
+static void
+cia_decode_ecc_error(struct el_CIA_sysdata_mcheck *cia, const char *msg)
+{
+ long syn;
+ long i;
+ const char *fmt;
+
+ cia_decode_mem_error(cia, msg);
+
+ syn = cia->cia_syn & 0xff;
+ if (syn == (syn & -syn)) {
+ fmt = KERN_CRIT " ECC syndrome %#x -- check bit %d\n";
+ i = ffs(syn) - 1;
+ } else {
+ static unsigned char const data_bit[64] = {
+ 0xCE, 0xCB, 0xD3, 0xD5,
+ 0xD6, 0xD9, 0xDA, 0xDC,
+ 0x23, 0x25, 0x26, 0x29,
+ 0x2A, 0x2C, 0x31, 0x34,
+ 0x0E, 0x0B, 0x13, 0x15,
+ 0x16, 0x19, 0x1A, 0x1C,
+ 0xE3, 0xE5, 0xE6, 0xE9,
+ 0xEA, 0xEC, 0xF1, 0xF4,
+ 0x4F, 0x4A, 0x52, 0x54,
+ 0x57, 0x58, 0x5B, 0x5D,
+ 0xA2, 0xA4, 0xA7, 0xA8,
+ 0xAB, 0xAD, 0xB0, 0xB5,
+ 0x8F, 0x8A, 0x92, 0x94,
+ 0x97, 0x98, 0x9B, 0x9D,
+ 0x62, 0x64, 0x67, 0x68,
+ 0x6B, 0x6D, 0x70, 0x75
+ };
+
+ for (i = 0; i < 64; ++i)
+ if (data_bit[i] == syn)
+ break;
+
+ if (i < 64)
+ fmt = KERN_CRIT " ECC syndrome %#x -- data bit %d\n";
+ else
+ fmt = KERN_CRIT " ECC syndrome %#x -- unknown bit\n";
+ }
+
+ printk (fmt, syn, i);
+}
+
+static void
+cia_decode_parity_error(struct el_CIA_sysdata_mcheck *cia)
+{
+ static const char * const cmd_desc[16] = {
+ "NOP", "LOCK", "FETCH", "FETCH_M", "MEMORY BARRIER",
+ "SET DIRTY", "WRITE BLOCK", "WRITE BLOCK LOCK",
+ "READ MISS0", "READ MISS1", "READ MISS MOD0",
+ "READ MISS MOD1", "BCACHE VICTIM", "Spare",
+ "READ MISS MOD STC0", "READ MISS MOD STC1"
+ };
+
+ unsigned long addr;
+ unsigned long mask;
+ const char *cmd;
+ int par;
+
+ addr = cia->cpu_err0 & 0xfffffff0;
+ addr |= (cia->cpu_err1 & 0x83UL) << 32;
+ cmd = cmd_desc[(cia->cpu_err1 >> 8) & 0xF];
+ mask = (cia->cpu_err1 >> 12) & 0xF;
+ par = (cia->cpu_err1 >> 21) & 1;
+
+ printk(KERN_CRIT "CIA machine check: System bus parity error\n");
+ printk(KERN_CRIT " Command: %s, Parity bit: %d\n", cmd, par);
+ printk(KERN_CRIT " Address: %#010lx, Mask: %#lx\n", addr, mask);
+}
+#endif /* CONFIG_VERBOSE_MCHECK */
+
+
+static int
+cia_decode_mchk(unsigned long la_ptr)
+{
+ struct el_common *com;
+ struct el_CIA_sysdata_mcheck *cia;
+
+ com = (void *)la_ptr;
+ cia = (void *)(la_ptr + com->sys_offset);
+
+ if ((cia->cia_err & CIA_ERR_VALID) == 0)
+ return 0;
+
+#ifdef CONFIG_VERBOSE_MCHECK
+ if (!alpha_verbose_mcheck)
+ return 1;
+
+ switch (ffs(cia->cia_err & 0xfff) - 1) {
+ case 0: /* CIA_ERR_COR_ERR */
+ cia_decode_ecc_error(cia, "Corrected ECC error");
+ break;
+ case 1: /* CIA_ERR_UN_COR_ERR */
+ cia_decode_ecc_error(cia, "Uncorrected ECC error");
+ break;
+ case 2: /* CIA_ERR_CPU_PE */
+ cia_decode_parity_error(cia);
+ break;
+ case 3: /* CIA_ERR_MEM_NEM */
+ cia_decode_mem_error(cia, "Access to nonexistent memory");
+ break;
+ case 4: /* CIA_ERR_PCI_SERR */
+ cia_decode_pci_error(cia, "PCI bus system error");
+ break;
+ case 5: /* CIA_ERR_PERR */
+ cia_decode_pci_error(cia, "PCI data parity error");
+ break;
+ case 6: /* CIA_ERR_PCI_ADDR_PE */
+ cia_decode_pci_error(cia, "PCI address parity error");
+ break;
+ case 7: /* CIA_ERR_RCVD_MAS_ABT */
+ cia_decode_pci_error(cia, "PCI master abort");
+ break;
+ case 8: /* CIA_ERR_RCVD_TAR_ABT */
+ cia_decode_pci_error(cia, "PCI target abort");
+ break;
+ case 9: /* CIA_ERR_PA_PTE_INV */
+ cia_decode_pci_error(cia, "PCI invalid PTE");
+ break;
+ case 10: /* CIA_ERR_FROM_WRT_ERR */
+ cia_decode_mem_error(cia, "Write to flash ROM attempted");
+ break;
+ case 11: /* CIA_ERR_IOA_TIMEOUT */
+ cia_decode_pci_error(cia, "I/O timeout");
+ break;
+ }
+
+ if (cia->cia_err & CIA_ERR_LOST_CORR_ERR)
+ printk(KERN_CRIT "CIA lost machine check: "
+ "Correctable ECC error\n");
+ if (cia->cia_err & CIA_ERR_LOST_UN_CORR_ERR)
+ printk(KERN_CRIT "CIA lost machine check: "
+ "Uncorrectable ECC error\n");
+ if (cia->cia_err & CIA_ERR_LOST_CPU_PE)
+ printk(KERN_CRIT "CIA lost machine check: "
+ "System bus parity error\n");
+ if (cia->cia_err & CIA_ERR_LOST_MEM_NEM)
+ printk(KERN_CRIT "CIA lost machine check: "
+ "Access to nonexistent memory\n");
+ if (cia->cia_err & CIA_ERR_LOST_PERR)
+ printk(KERN_CRIT "CIA lost machine check: "
+ "PCI data parity error\n");
+ if (cia->cia_err & CIA_ERR_LOST_PCI_ADDR_PE)
+ printk(KERN_CRIT "CIA lost machine check: "
+ "PCI address parity error\n");
+ if (cia->cia_err & CIA_ERR_LOST_RCVD_MAS_ABT)
+ printk(KERN_CRIT "CIA lost machine check: "
+ "PCI master abort\n");
+ if (cia->cia_err & CIA_ERR_LOST_RCVD_TAR_ABT)
+ printk(KERN_CRIT "CIA lost machine check: "
+ "PCI target abort\n");
+ if (cia->cia_err & CIA_ERR_LOST_PA_PTE_INV)
+ printk(KERN_CRIT "CIA lost machine check: "
+ "PCI invalid PTE\n");
+ if (cia->cia_err & CIA_ERR_LOST_FROM_WRT_ERR)
+ printk(KERN_CRIT "CIA lost machine check: "
+ "Write to flash ROM attempted\n");
+ if (cia->cia_err & CIA_ERR_LOST_IOA_TIMEOUT)
+ printk(KERN_CRIT "CIA lost machine check: "
+ "I/O timeout\n");
+#endif /* CONFIG_VERBOSE_MCHECK */
+
+ return 1;
+}
+
+void
+cia_machine_check(unsigned long vector, unsigned long la_ptr,
+ struct pt_regs * regs)
+{
+ int expected;
+
+ /* Clear the error before any reporting. */
+ mb();
+ mb(); /* magic */
+ draina();
+ cia_pci_clr_err();
+ wrmces(rdmces()); /* reset machine check pending flag. */
+ mb();
+
+ expected = mcheck_expected(0);
+ if (!expected && vector == 0x660)
+ expected = cia_decode_mchk(la_ptr);
+ process_mcheck_info(vector, la_ptr, regs, "CIA", expected);
+}
diff --git a/arch/alpha/kernel/core_irongate.c b/arch/alpha/kernel/core_irongate.c
new file mode 100644
index 000000000000..138d497d1cca
--- /dev/null
+++ b/arch/alpha/kernel/core_irongate.c
@@ -0,0 +1,416 @@
+/*
+ * linux/arch/alpha/kernel/core_irongate.c
+ *
+ * Based on code written by David A. Rusling (david.rusling@reo.mts.dec.com).
+ *
+ * Copyright (C) 1999 Alpha Processor, Inc.,
+ * (David Daniel, Stig Telfer, Soohoon Lee)
+ *
+ * Code common to all IRONGATE core logic chips.
+ */
+
+#define __EXTERN_INLINE inline
+#include <asm/io.h>
+#include <asm/core_irongate.h>
+#undef __EXTERN_INLINE
+
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/sched.h>
+#include <linux/init.h>
+#include <linux/initrd.h>
+#include <linux/bootmem.h>
+
+#include <asm/ptrace.h>
+#include <asm/pci.h>
+#include <asm/cacheflush.h>
+#include <asm/tlbflush.h>
+
+#include "proto.h"
+#include "pci_impl.h"
+
+/*
+ * BIOS32-style PCI interface:
+ */
+
+#define DEBUG_CONFIG 0
+
+#if DEBUG_CONFIG
+# define DBG_CFG(args) printk args
+#else
+# define DBG_CFG(args)
+#endif
+
+igcsr32 *IronECC;
+
+/*
+ * Given a bus, device, and function number, compute resulting
+ * configuration space address accordingly. It is therefore not safe
+ * to have concurrent invocations to configuration space access
+ * routines, but there really shouldn't be any need for this.
+ *
+ * addr[31:24] reserved
+ * addr[23:16] bus number (8 bits = 128 possible buses)
+ * addr[15:11] Device number (5 bits)
+ * addr[10: 8] function number
+ * addr[ 7: 2] register number
+ *
+ * For IRONGATE:
+ * if (bus = addr[23:16]) == 0
+ * then
+ * type 0 config cycle:
+ * addr_on_pci[31:11] = id selection for device = addr[15:11]
+ * addr_on_pci[10: 2] = addr[10: 2] ???
+ * addr_on_pci[ 1: 0] = 00
+ * else
+ * type 1 config cycle (pass on with no decoding):
+ * addr_on_pci[31:24] = 0
+ * addr_on_pci[23: 2] = addr[23: 2]
+ * addr_on_pci[ 1: 0] = 01
+ * fi
+ *
+ * Notes:
+ * The function number selects which function of a multi-function device
+ * (e.g., SCSI and Ethernet).
+ *
+ * The register selects a DWORD (32 bit) register offset. Hence it
+ * doesn't get shifted by 2 bits as we want to "drop" the bottom two
+ * bits.
+ */
+
+static int
+mk_conf_addr(struct pci_bus *pbus, unsigned int device_fn, int where,
+ unsigned long *pci_addr, unsigned char *type1)
+{
+ unsigned long addr;
+ u8 bus = pbus->number;
+
+ DBG_CFG(("mk_conf_addr(bus=%d ,device_fn=0x%x, where=0x%x, "
+ "pci_addr=0x%p, type1=0x%p)\n",
+ bus, device_fn, where, pci_addr, type1));
+
+ *type1 = (bus != 0);
+
+ addr = (bus << 16) | (device_fn << 8) | where;
+ addr |= IRONGATE_CONF;
+
+ *pci_addr = addr;
+ DBG_CFG(("mk_conf_addr: returning pci_addr 0x%lx\n", addr));
+ return 0;
+}
+
+static int
+irongate_read_config(struct pci_bus *bus, unsigned int devfn, int where,
+ int size, u32 *value)
+{
+ unsigned long addr;
+ unsigned char type1;
+
+ if (mk_conf_addr(bus, devfn, where, &addr, &type1))
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ switch (size) {
+ case 1:
+ *value = __kernel_ldbu(*(vucp)addr);
+ break;
+ case 2:
+ *value = __kernel_ldwu(*(vusp)addr);
+ break;
+ case 4:
+ *value = *(vuip)addr;
+ break;
+ }
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int
+irongate_write_config(struct pci_bus *bus, unsigned int devfn, int where,
+ int size, u32 value)
+{
+ unsigned long addr;
+ unsigned char type1;
+
+ if (mk_conf_addr(bus, devfn, where, &addr, &type1))
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ switch (size) {
+ case 1:
+ __kernel_stb(value, *(vucp)addr);
+ mb();
+ __kernel_ldbu(*(vucp)addr);
+ break;
+ case 2:
+ __kernel_stw(value, *(vusp)addr);
+ mb();
+ __kernel_ldwu(*(vusp)addr);
+ break;
+ case 4:
+ *(vuip)addr = value;
+ mb();
+ *(vuip)addr;
+ break;
+ }
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+struct pci_ops irongate_pci_ops =
+{
+ .read = irongate_read_config,
+ .write = irongate_write_config,
+};
+
+int
+irongate_pci_clr_err(void)
+{
+ unsigned int nmi_ctl=0;
+ unsigned int IRONGATE_jd;
+
+again:
+ IRONGATE_jd = IRONGATE0->stat_cmd;
+ printk("Iron stat_cmd %x\n", IRONGATE_jd);
+ IRONGATE0->stat_cmd = IRONGATE_jd; /* write again clears error bits */
+ mb();
+ IRONGATE_jd = IRONGATE0->stat_cmd; /* re-read to force write */
+
+ IRONGATE_jd = *IronECC;
+ printk("Iron ECC %x\n", IRONGATE_jd);
+ *IronECC = IRONGATE_jd; /* write again clears error bits */
+ mb();
+ IRONGATE_jd = *IronECC; /* re-read to force write */
+
+ /* Clear ALI NMI */
+ nmi_ctl = inb(0x61);
+ nmi_ctl |= 0x0c;
+ outb(nmi_ctl, 0x61);
+ nmi_ctl &= ~0x0c;
+ outb(nmi_ctl, 0x61);
+
+ IRONGATE_jd = *IronECC;
+ if (IRONGATE_jd & 0x300) goto again;
+
+ return 0;
+}
+
+#define IRONGATE_3GB 0xc0000000UL
+
+/* On Albacore (aka UP1500) with 4Gb of RAM we have to reserve some
+ memory for PCI. At this point we just reserve memory above 3Gb. Most
+ of this memory will be freed after PCI setup is done. */
+static void __init
+albacore_init_arch(void)
+{
+ unsigned long memtop = max_low_pfn << PAGE_SHIFT;
+ unsigned long pci_mem = (memtop + 0x1000000UL) & ~0xffffffUL;
+ struct percpu_struct *cpu;
+ int pal_rev, pal_var;
+
+ cpu = (struct percpu_struct*)((char*)hwrpb + hwrpb->processor_offset);
+ pal_rev = cpu->pal_revision & 0xffff;
+ pal_var = (cpu->pal_revision >> 16) & 0xff;
+
+ /* Consoles earlier than A5.6-18 (OSF PALcode v1.62-2) set up
+ the CPU incorrectly (leave speculative stores enabled),
+ which causes memory corruption under certain conditions.
+ Issue a warning for such consoles. */
+ if (alpha_using_srm &&
+ (pal_rev < 0x13e || (pal_rev == 0x13e && pal_var < 2)))
+ printk(KERN_WARNING "WARNING! Upgrade to SRM A5.6-19 "
+ "or later\n");
+
+ if (pci_mem > IRONGATE_3GB)
+ pci_mem = IRONGATE_3GB;
+ IRONGATE0->pci_mem = pci_mem;
+ alpha_mv.min_mem_address = pci_mem;
+ if (memtop > pci_mem) {
+#ifdef CONFIG_BLK_DEV_INITRD
+ extern unsigned long initrd_start, initrd_end;
+ extern void *move_initrd(unsigned long);
+
+ /* Move the initrd out of the way. */
+ if (initrd_end && __pa(initrd_end) > pci_mem) {
+ unsigned long size;
+
+ size = initrd_end - initrd_start;
+ free_bootmem_node(NODE_DATA(0), __pa(initrd_start),
+ PAGE_ALIGN(size));
+ if (!move_initrd(pci_mem))
+ printk("irongate_init_arch: initrd too big "
+ "(%ldK)\ndisabling initrd\n",
+ size / 1024);
+ }
+#endif
+ reserve_bootmem_node(NODE_DATA(0), pci_mem, memtop - pci_mem);
+ printk("irongate_init_arch: temporarily reserving "
+ "region %08lx-%08lx for PCI\n", pci_mem, memtop - 1);
+ }
+}
+
+static void __init
+irongate_setup_agp(void)
+{
+ /* Disable the GART window. AGPGART doesn't work due to yet
+ unresolved memory coherency issues... */
+ IRONGATE0->agpva = IRONGATE0->agpva & ~0xf;
+ alpha_agpgart_size = 0;
+}
+
+void __init
+irongate_init_arch(void)
+{
+ struct pci_controller *hose;
+ int amd761 = (IRONGATE0->dev_vendor >> 16) > 0x7006; /* Albacore? */
+
+ IronECC = amd761 ? &IRONGATE0->bacsr54_eccms761 : &IRONGATE0->dramms;
+
+ irongate_pci_clr_err();
+
+ if (amd761)
+ albacore_init_arch();
+
+ irongate_setup_agp();
+
+ /*
+ * Create our single hose.
+ */
+
+ pci_isa_hose = hose = alloc_pci_controller();
+ hose->io_space = &ioport_resource;
+ hose->mem_space = &iomem_resource;
+ hose->index = 0;
+
+ /* This is for userland consumption. For some reason, the 40-bit
+ PIO bias that we use in the kernel through KSEG didn't work for
+ the page table based user mappings. So make sure we get the
+ 43-bit PIO bias. */
+ hose->sparse_mem_base = 0;
+ hose->sparse_io_base = 0;
+ hose->dense_mem_base
+ = (IRONGATE_MEM & 0xffffffffffUL) | 0x80000000000UL;
+ hose->dense_io_base
+ = (IRONGATE_IO & 0xffffffffffUL) | 0x80000000000UL;
+
+ hose->sg_isa = hose->sg_pci = NULL;
+ __direct_map_base = 0;
+ __direct_map_size = 0xffffffff;
+}
+
+/*
+ * IO map and AGP support
+ */
+#include <linux/vmalloc.h>
+#include <linux/agp_backend.h>
+#include <linux/agpgart.h>
+#include <asm/pgalloc.h>
+
+#define GET_PAGE_DIR_OFF(addr) (addr >> 22)
+#define GET_PAGE_DIR_IDX(addr) (GET_PAGE_DIR_OFF(addr))
+
+#define GET_GATT_OFF(addr) ((addr & 0x003ff000) >> 12)
+#define GET_GATT(addr) (gatt_pages[GET_PAGE_DIR_IDX(addr)])
+
+void __iomem *
+irongate_ioremap(unsigned long addr, unsigned long size)
+{
+ struct vm_struct *area;
+ unsigned long vaddr;
+ unsigned long baddr, last;
+ u32 *mmio_regs, *gatt_pages, *cur_gatt, pte;
+ unsigned long gart_bus_addr;
+
+ if (!alpha_agpgart_size)
+ return (void __iomem *)(addr + IRONGATE_MEM);
+
+ gart_bus_addr = (unsigned long)IRONGATE0->bar0 &
+ PCI_BASE_ADDRESS_MEM_MASK;
+
+ /*
+ * Check for within the AGP aperture...
+ */
+ do {
+ /*
+ * Check the AGP area
+ */
+ if (addr >= gart_bus_addr && addr + size - 1 <
+ gart_bus_addr + alpha_agpgart_size)
+ break;
+
+ /*
+ * Not found - assume legacy ioremap
+ */
+ return (void __iomem *)(addr + IRONGATE_MEM);
+ } while(0);
+
+ mmio_regs = (u32 *)(((unsigned long)IRONGATE0->bar1 &
+ PCI_BASE_ADDRESS_MEM_MASK) + IRONGATE_MEM);
+
+ gatt_pages = (u32 *)(phys_to_virt(mmio_regs[1])); /* FIXME */
+
+ /*
+ * Adjust the limits (mappings must be page aligned)
+ */
+ if (addr & ~PAGE_MASK) {
+ printk("AGP ioremap failed... addr not page aligned (0x%lx)\n",
+ addr);
+ return (void __iomem *)(addr + IRONGATE_MEM);
+ }
+ last = addr + size - 1;
+ size = PAGE_ALIGN(last) - addr;
+
+#if 0
+ printk("irongate_ioremap(0x%lx, 0x%lx)\n", addr, size);
+ printk("irongate_ioremap: gart_bus_addr 0x%lx\n", gart_bus_addr);
+ printk("irongate_ioremap: gart_aper_size 0x%lx\n", gart_aper_size);
+ printk("irongate_ioremap: mmio_regs %p\n", mmio_regs);
+ printk("irongate_ioremap: gatt_pages %p\n", gatt_pages);
+
+ for(baddr = addr; baddr <= last; baddr += PAGE_SIZE)
+ {
+ cur_gatt = phys_to_virt(GET_GATT(baddr) & ~1);
+ pte = cur_gatt[GET_GATT_OFF(baddr)] & ~1;
+ printk("irongate_ioremap: cur_gatt %p pte 0x%x\n",
+ cur_gatt, pte);
+ }
+#endif
+
+ /*
+ * Map it
+ */
+ area = get_vm_area(size, VM_IOREMAP);
+ if (!area) return NULL;
+
+ for(baddr = addr, vaddr = (unsigned long)area->addr;
+ baddr <= last;
+ baddr += PAGE_SIZE, vaddr += PAGE_SIZE)
+ {
+ cur_gatt = phys_to_virt(GET_GATT(baddr) & ~1);
+ pte = cur_gatt[GET_GATT_OFF(baddr)] & ~1;
+
+ if (__alpha_remap_area_pages(vaddr,
+ pte, PAGE_SIZE, 0)) {
+ printk("AGP ioremap: FAILED to map...\n");
+ vfree(area->addr);
+ return NULL;
+ }
+ }
+
+ flush_tlb_all();
+
+ vaddr = (unsigned long)area->addr + (addr & ~PAGE_MASK);
+#if 0
+ printk("irongate_ioremap(0x%lx, 0x%lx) returning 0x%lx\n",
+ addr, size, vaddr);
+#endif
+ return (void __iomem *)vaddr;
+}
+
+void
+irongate_iounmap(volatile void __iomem *xaddr)
+{
+ unsigned long addr = (unsigned long) xaddr;
+ if (((long)addr >> 41) == -2)
+ return; /* kseg map, nothing to do */
+ if (addr)
+ return vfree((void *)(PAGE_MASK & addr));
+}
diff --git a/arch/alpha/kernel/core_lca.c b/arch/alpha/kernel/core_lca.c
new file mode 100644
index 000000000000..6a5a9145c676
--- /dev/null
+++ b/arch/alpha/kernel/core_lca.c
@@ -0,0 +1,515 @@
+/*
+ * linux/arch/alpha/kernel/core_lca.c
+ *
+ * Written by David Mosberger (davidm@cs.arizona.edu) with some code
+ * taken from Dave Rusling's (david.rusling@reo.mts.dec.com) 32-bit
+ * bios code.
+ *
+ * Code common to all LCA core logic chips.
+ */
+
+#define __EXTERN_INLINE inline
+#include <asm/io.h>
+#include <asm/core_lca.h>
+#undef __EXTERN_INLINE
+
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/tty.h>
+
+#include <asm/ptrace.h>
+#include <asm/smp.h>
+
+#include "proto.h"
+#include "pci_impl.h"
+
+
+/*
+ * BIOS32-style PCI interface:
+ */
+
+/*
+ * Machine check reasons. Defined according to PALcode sources
+ * (osf.h and platform.h).
+ */
+#define MCHK_K_TPERR 0x0080
+#define MCHK_K_TCPERR 0x0082
+#define MCHK_K_HERR 0x0084
+#define MCHK_K_ECC_C 0x0086
+#define MCHK_K_ECC_NC 0x0088
+#define MCHK_K_UNKNOWN 0x008A
+#define MCHK_K_CACKSOFT 0x008C
+#define MCHK_K_BUGCHECK 0x008E
+#define MCHK_K_OS_BUGCHECK 0x0090
+#define MCHK_K_DCPERR 0x0092
+#define MCHK_K_ICPERR 0x0094
+
+
+/*
+ * Platform-specific machine-check reasons:
+ */
+#define MCHK_K_SIO_SERR 0x204 /* all platforms so far */
+#define MCHK_K_SIO_IOCHK 0x206 /* all platforms so far */
+#define MCHK_K_DCSR 0x208 /* all but Noname */
+
+
+/*
+ * Given a bus, device, and function number, compute resulting
+ * configuration space address and setup the LCA_IOC_CONF register
+ * accordingly. It is therefore not safe to have concurrent
+ * invocations to configuration space access routines, but there
+ * really shouldn't be any need for this.
+ *
+ * Type 0:
+ *
+ * 3 3|3 3 2 2|2 2 2 2|2 2 2 2|1 1 1 1|1 1 1 1|1 1
+ * 3 2|1 0 9 8|7 6 5 4|3 2 1 0|9 8 7 6|5 4 3 2|1 0 9 8|7 6 5 4|3 2 1 0
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | | | | | | | | | | | | | | | | | | | | | | | |F|F|F|R|R|R|R|R|R|0|0|
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ *
+ * 31:11 Device select bit.
+ * 10:8 Function number
+ * 7:2 Register number
+ *
+ * Type 1:
+ *
+ * 3 3|3 3 2 2|2 2 2 2|2 2 2 2|1 1 1 1|1 1 1 1|1 1
+ * 3 2|1 0 9 8|7 6 5 4|3 2 1 0|9 8 7 6|5 4 3 2|1 0 9 8|7 6 5 4|3 2 1 0
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | | | | | | | | | | |B|B|B|B|B|B|B|B|D|D|D|D|D|F|F|F|R|R|R|R|R|R|0|1|
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ *
+ * 31:24 reserved
+ * 23:16 bus number (8 bits = 128 possible buses)
+ * 15:11 Device number (5 bits)
+ * 10:8 function number
+ * 7:2 register number
+ *
+ * Notes:
+ * The function number selects which function of a multi-function device
+ * (e.g., SCSI and Ethernet).
+ *
+ * The register selects a DWORD (32 bit) register offset. Hence it
+ * doesn't get shifted by 2 bits as we want to "drop" the bottom two
+ * bits.
+ */
+
+static int
+mk_conf_addr(struct pci_bus *pbus, unsigned int device_fn, int where,
+ unsigned long *pci_addr)
+{
+ unsigned long addr;
+ u8 bus = pbus->number;
+
+ if (bus == 0) {
+ int device = device_fn >> 3;
+ int func = device_fn & 0x7;
+
+ /* Type 0 configuration cycle. */
+
+ if (device > 12) {
+ return -1;
+ }
+
+ *(vulp)LCA_IOC_CONF = 0;
+ addr = (1 << (11 + device)) | (func << 8) | where;
+ } else {
+ /* Type 1 configuration cycle. */
+ *(vulp)LCA_IOC_CONF = 1;
+ addr = (bus << 16) | (device_fn << 8) | where;
+ }
+ *pci_addr = addr;
+ return 0;
+}
+
+static unsigned int
+conf_read(unsigned long addr)
+{
+ unsigned long flags, code, stat0;
+ unsigned int value;
+
+ local_irq_save(flags);
+
+ /* Reset status register to avoid loosing errors. */
+ stat0 = *(vulp)LCA_IOC_STAT0;
+ *(vulp)LCA_IOC_STAT0 = stat0;
+ mb();
+
+ /* Access configuration space. */
+ value = *(vuip)addr;
+ draina();
+
+ stat0 = *(vulp)LCA_IOC_STAT0;
+ if (stat0 & LCA_IOC_STAT0_ERR) {
+ code = ((stat0 >> LCA_IOC_STAT0_CODE_SHIFT)
+ & LCA_IOC_STAT0_CODE_MASK);
+ if (code != 1) {
+ printk("lca.c:conf_read: got stat0=%lx\n", stat0);
+ }
+
+ /* Reset error status. */
+ *(vulp)LCA_IOC_STAT0 = stat0;
+ mb();
+
+ /* Reset machine check. */
+ wrmces(0x7);
+
+ value = 0xffffffff;
+ }
+ local_irq_restore(flags);
+ return value;
+}
+
+static void
+conf_write(unsigned long addr, unsigned int value)
+{
+ unsigned long flags, code, stat0;
+
+ local_irq_save(flags); /* avoid getting hit by machine check */
+
+ /* Reset status register to avoid loosing errors. */
+ stat0 = *(vulp)LCA_IOC_STAT0;
+ *(vulp)LCA_IOC_STAT0 = stat0;
+ mb();
+
+ /* Access configuration space. */
+ *(vuip)addr = value;
+ draina();
+
+ stat0 = *(vulp)LCA_IOC_STAT0;
+ if (stat0 & LCA_IOC_STAT0_ERR) {
+ code = ((stat0 >> LCA_IOC_STAT0_CODE_SHIFT)
+ & LCA_IOC_STAT0_CODE_MASK);
+ if (code != 1) {
+ printk("lca.c:conf_write: got stat0=%lx\n", stat0);
+ }
+
+ /* Reset error status. */
+ *(vulp)LCA_IOC_STAT0 = stat0;
+ mb();
+
+ /* Reset machine check. */
+ wrmces(0x7);
+ }
+ local_irq_restore(flags);
+}
+
+static int
+lca_read_config(struct pci_bus *bus, unsigned int devfn, int where,
+ int size, u32 *value)
+{
+ unsigned long addr, pci_addr;
+ long mask;
+ int shift;
+
+ if (mk_conf_addr(bus, devfn, where, &pci_addr))
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ shift = (where & 3) * 8;
+ mask = (size - 1) * 8;
+ addr = (pci_addr << 5) + mask + LCA_CONF;
+ *value = conf_read(addr) >> (shift);
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int
+lca_write_config(struct pci_bus *bus, unsigned int devfn, int where, int size,
+ u32 value)
+{
+ unsigned long addr, pci_addr;
+ long mask;
+
+ if (mk_conf_addr(bus, devfn, where, &pci_addr))
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ mask = (size - 1) * 8;
+ addr = (pci_addr << 5) + mask + LCA_CONF;
+ conf_write(addr, value << ((where & 3) * 8));
+ return PCIBIOS_SUCCESSFUL;
+}
+
+struct pci_ops lca_pci_ops =
+{
+ .read = lca_read_config,
+ .write = lca_write_config,
+};
+
+void
+lca_pci_tbi(struct pci_controller *hose, dma_addr_t start, dma_addr_t end)
+{
+ wmb();
+ *(vulp)LCA_IOC_TBIA = 0;
+ mb();
+}
+
+void __init
+lca_init_arch(void)
+{
+ struct pci_controller *hose;
+
+ /*
+ * Create our single hose.
+ */
+
+ pci_isa_hose = hose = alloc_pci_controller();
+ hose->io_space = &ioport_resource;
+ hose->mem_space = &iomem_resource;
+ hose->index = 0;
+
+ hose->sparse_mem_base = LCA_SPARSE_MEM - IDENT_ADDR;
+ hose->dense_mem_base = LCA_DENSE_MEM - IDENT_ADDR;
+ hose->sparse_io_base = LCA_IO - IDENT_ADDR;
+ hose->dense_io_base = 0;
+
+ /*
+ * Set up the PCI to main memory translation windows.
+ *
+ * Mimic the SRM settings for the direct-map window.
+ * Window 0 is scatter-gather 8MB at 8MB (for isa).
+ * Window 1 is direct access 1GB at 1GB.
+ *
+ * Note that we do not try to save any of the DMA window CSRs
+ * before setting them, since we cannot read those CSRs on LCA.
+ */
+ hose->sg_isa = iommu_arena_new(hose, 0x00800000, 0x00800000, 0);
+ hose->sg_pci = NULL;
+ __direct_map_base = 0x40000000;
+ __direct_map_size = 0x40000000;
+
+ *(vulp)LCA_IOC_W_BASE0 = hose->sg_isa->dma_base | (3UL << 32);
+ *(vulp)LCA_IOC_W_MASK0 = (hose->sg_isa->size - 1) & 0xfff00000;
+ *(vulp)LCA_IOC_T_BASE0 = virt_to_phys(hose->sg_isa->ptes);
+
+ *(vulp)LCA_IOC_W_BASE1 = __direct_map_base | (2UL << 32);
+ *(vulp)LCA_IOC_W_MASK1 = (__direct_map_size - 1) & 0xfff00000;
+ *(vulp)LCA_IOC_T_BASE1 = 0;
+
+ *(vulp)LCA_IOC_TB_ENA = 0x80;
+
+ lca_pci_tbi(hose, 0, -1);
+
+ /*
+ * Disable PCI parity for now. The NCR53c810 chip has
+ * troubles meeting the PCI spec which results in
+ * data parity errors.
+ */
+ *(vulp)LCA_IOC_PAR_DIS = 1UL<<5;
+
+ /*
+ * Finally, set up for restoring the correct HAE if using SRM.
+ * Again, since we cannot read many of the CSRs on the LCA,
+ * one of which happens to be the HAE, we save the value that
+ * the SRM will expect...
+ */
+ if (alpha_using_srm)
+ srm_hae = 0x80000000UL;
+}
+
+/*
+ * Constants used during machine-check handling. I suppose these
+ * could be moved into lca.h but I don't see much reason why anybody
+ * else would want to use them.
+ */
+
+#define ESR_EAV (1UL<< 0) /* error address valid */
+#define ESR_CEE (1UL<< 1) /* correctable error */
+#define ESR_UEE (1UL<< 2) /* uncorrectable error */
+#define ESR_WRE (1UL<< 3) /* write-error */
+#define ESR_SOR (1UL<< 4) /* error source */
+#define ESR_CTE (1UL<< 7) /* cache-tag error */
+#define ESR_MSE (1UL<< 9) /* multiple soft errors */
+#define ESR_MHE (1UL<<10) /* multiple hard errors */
+#define ESR_NXM (1UL<<12) /* non-existent memory */
+
+#define IOC_ERR ( 1<<4) /* ioc logs an error */
+#define IOC_CMD_SHIFT 0
+#define IOC_CMD (0xf<<IOC_CMD_SHIFT)
+#define IOC_CODE_SHIFT 8
+#define IOC_CODE (0xf<<IOC_CODE_SHIFT)
+#define IOC_LOST ( 1<<5)
+#define IOC_P_NBR ((__u32) ~((1<<13) - 1))
+
+static void
+mem_error(unsigned long esr, unsigned long ear)
+{
+ printk(" %s %s error to %s occurred at address %x\n",
+ ((esr & ESR_CEE) ? "Correctable" :
+ (esr & ESR_UEE) ? "Uncorrectable" : "A"),
+ (esr & ESR_WRE) ? "write" : "read",
+ (esr & ESR_SOR) ? "memory" : "b-cache",
+ (unsigned) (ear & 0x1ffffff8));
+ if (esr & ESR_CTE) {
+ printk(" A b-cache tag parity error was detected.\n");
+ }
+ if (esr & ESR_MSE) {
+ printk(" Several other correctable errors occurred.\n");
+ }
+ if (esr & ESR_MHE) {
+ printk(" Several other uncorrectable errors occurred.\n");
+ }
+ if (esr & ESR_NXM) {
+ printk(" Attempted to access non-existent memory.\n");
+ }
+}
+
+static void
+ioc_error(__u32 stat0, __u32 stat1)
+{
+ static const char * const pci_cmd[] = {
+ "Interrupt Acknowledge", "Special", "I/O Read", "I/O Write",
+ "Rsvd 1", "Rsvd 2", "Memory Read", "Memory Write", "Rsvd3",
+ "Rsvd4", "Configuration Read", "Configuration Write",
+ "Memory Read Multiple", "Dual Address", "Memory Read Line",
+ "Memory Write and Invalidate"
+ };
+ static const char * const err_name[] = {
+ "exceeded retry limit", "no device", "bad data parity",
+ "target abort", "bad address parity", "page table read error",
+ "invalid page", "data error"
+ };
+ unsigned code = (stat0 & IOC_CODE) >> IOC_CODE_SHIFT;
+ unsigned cmd = (stat0 & IOC_CMD) >> IOC_CMD_SHIFT;
+
+ printk(" %s initiated PCI %s cycle to address %x"
+ " failed due to %s.\n",
+ code > 3 ? "PCI" : "CPU", pci_cmd[cmd], stat1, err_name[code]);
+
+ if (code == 5 || code == 6) {
+ printk(" (Error occurred at PCI memory address %x.)\n",
+ (stat0 & ~IOC_P_NBR));
+ }
+ if (stat0 & IOC_LOST) {
+ printk(" Other PCI errors occurred simultaneously.\n");
+ }
+}
+
+void
+lca_machine_check(unsigned long vector, unsigned long la_ptr,
+ struct pt_regs *regs)
+{
+ const char * reason;
+ union el_lca el;
+
+ el.c = (struct el_common *) la_ptr;
+
+ wrmces(rdmces()); /* reset machine check pending flag */
+
+ printk(KERN_CRIT "LCA machine check: vector=%#lx pc=%#lx code=%#x\n",
+ vector, regs->pc, (unsigned int) el.c->code);
+
+ /*
+ * The first quadword after the common header always seems to
+ * be the machine check reason---don't know why this isn't
+ * part of the common header instead. In the case of a long
+ * logout frame, the upper 32 bits is the machine check
+ * revision level, which we ignore for now.
+ */
+ switch ((unsigned int) el.c->code) {
+ case MCHK_K_TPERR: reason = "tag parity error"; break;
+ case MCHK_K_TCPERR: reason = "tag control parity error"; break;
+ case MCHK_K_HERR: reason = "access to non-existent memory"; break;
+ case MCHK_K_ECC_C: reason = "correctable ECC error"; break;
+ case MCHK_K_ECC_NC: reason = "non-correctable ECC error"; break;
+ case MCHK_K_CACKSOFT: reason = "MCHK_K_CACKSOFT"; break;
+ case MCHK_K_BUGCHECK: reason = "illegal exception in PAL mode"; break;
+ case MCHK_K_OS_BUGCHECK: reason = "callsys in kernel mode"; break;
+ case MCHK_K_DCPERR: reason = "d-cache parity error"; break;
+ case MCHK_K_ICPERR: reason = "i-cache parity error"; break;
+ case MCHK_K_SIO_SERR: reason = "SIO SERR occurred on PCI bus"; break;
+ case MCHK_K_SIO_IOCHK: reason = "SIO IOCHK occurred on ISA bus"; break;
+ case MCHK_K_DCSR: reason = "MCHK_K_DCSR"; break;
+ case MCHK_K_UNKNOWN:
+ default: reason = "unknown"; break;
+ }
+
+ switch (el.c->size) {
+ case sizeof(struct el_lca_mcheck_short):
+ printk(KERN_CRIT
+ " Reason: %s (short frame%s, dc_stat=%#lx):\n",
+ reason, el.c->retry ? ", retryable" : "",
+ el.s->dc_stat);
+ if (el.s->esr & ESR_EAV) {
+ mem_error(el.s->esr, el.s->ear);
+ }
+ if (el.s->ioc_stat0 & IOC_ERR) {
+ ioc_error(el.s->ioc_stat0, el.s->ioc_stat1);
+ }
+ break;
+
+ case sizeof(struct el_lca_mcheck_long):
+ printk(KERN_CRIT " Reason: %s (long frame%s):\n",
+ reason, el.c->retry ? ", retryable" : "");
+ printk(KERN_CRIT
+ " reason: %#lx exc_addr: %#lx dc_stat: %#lx\n",
+ el.l->pt[0], el.l->exc_addr, el.l->dc_stat);
+ printk(KERN_CRIT " car: %#lx\n", el.l->car);
+ if (el.l->esr & ESR_EAV) {
+ mem_error(el.l->esr, el.l->ear);
+ }
+ if (el.l->ioc_stat0 & IOC_ERR) {
+ ioc_error(el.l->ioc_stat0, el.l->ioc_stat1);
+ }
+ break;
+
+ default:
+ printk(KERN_CRIT " Unknown errorlog size %d\n", el.c->size);
+ }
+
+ /* Dump the logout area to give all info. */
+#ifdef CONFIG_VERBOSE_MCHECK
+ if (alpha_verbose_mcheck > 1) {
+ unsigned long * ptr = (unsigned long *) la_ptr;
+ long i;
+ for (i = 0; i < el.c->size / sizeof(long); i += 2) {
+ printk(KERN_CRIT " +%8lx %016lx %016lx\n",
+ i*sizeof(long), ptr[i], ptr[i+1]);
+ }
+ }
+#endif /* CONFIG_VERBOSE_MCHECK */
+}
+
+/*
+ * The following routines are needed to support the SPEED changing
+ * necessary to successfully manage the thermal problem on the AlphaBook1.
+ */
+
+void
+lca_clock_print(void)
+{
+ long pmr_reg;
+
+ pmr_reg = LCA_READ_PMR;
+
+ printk("Status of clock control:\n");
+ printk("\tPrimary clock divisor\t0x%lx\n", LCA_GET_PRIMARY(pmr_reg));
+ printk("\tOverride clock divisor\t0x%lx\n", LCA_GET_OVERRIDE(pmr_reg));
+ printk("\tInterrupt override is %s\n",
+ (pmr_reg & LCA_PMR_INTO) ? "on" : "off");
+ printk("\tDMA override is %s\n",
+ (pmr_reg & LCA_PMR_DMAO) ? "on" : "off");
+
+}
+
+int
+lca_get_clock(void)
+{
+ long pmr_reg;
+
+ pmr_reg = LCA_READ_PMR;
+ return(LCA_GET_PRIMARY(pmr_reg));
+
+}
+
+void
+lca_clock_fiddle(int divisor)
+{
+ long pmr_reg;
+
+ pmr_reg = LCA_READ_PMR;
+ LCA_SET_PRIMARY_CLOCK(pmr_reg, divisor);
+ /* lca_norm_clock = divisor; */
+ LCA_WRITE_PMR(pmr_reg);
+ mb();
+}
diff --git a/arch/alpha/kernel/core_marvel.c b/arch/alpha/kernel/core_marvel.c
new file mode 100644
index 000000000000..44866cb26a80
--- /dev/null
+++ b/arch/alpha/kernel/core_marvel.c
@@ -0,0 +1,1154 @@
+/*
+ * linux/arch/alpha/kernel/core_marvel.c
+ *
+ * Code common to all Marvel based systems.
+ */
+
+#define __EXTERN_INLINE inline
+#include <asm/io.h>
+#include <asm/core_marvel.h>
+#undef __EXTERN_INLINE
+
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/sched.h>
+#include <linux/init.h>
+#include <linux/vmalloc.h>
+#include <linux/mc146818rtc.h>
+#include <linux/rtc.h>
+#include <linux/module.h>
+#include <linux/bootmem.h>
+
+#include <asm/ptrace.h>
+#include <asm/smp.h>
+#include <asm/gct.h>
+#include <asm/pgalloc.h>
+#include <asm/tlbflush.h>
+#include <asm/rtc.h>
+
+#include "proto.h"
+#include "pci_impl.h"
+
+
+/*
+ * Debug helpers
+ */
+#define DEBUG_CONFIG 0
+
+#if DEBUG_CONFIG
+# define DBG_CFG(args) printk args
+#else
+# define DBG_CFG(args)
+#endif
+
+
+/*
+ * Private data
+ */
+static struct io7 *io7_head = NULL;
+
+
+/*
+ * Helper functions
+ */
+static unsigned long __attribute__ ((unused))
+read_ev7_csr(int pe, unsigned long offset)
+{
+ ev7_csr *ev7csr = EV7_CSR_KERN(pe, offset);
+ unsigned long q;
+
+ mb();
+ q = ev7csr->csr;
+ mb();
+
+ return q;
+}
+
+static void __attribute__ ((unused))
+write_ev7_csr(int pe, unsigned long offset, unsigned long q)
+{
+ ev7_csr *ev7csr = EV7_CSR_KERN(pe, offset);
+
+ mb();
+ ev7csr->csr = q;
+ mb();
+}
+
+static char * __init
+mk_resource_name(int pe, int port, char *str)
+{
+ char tmp[80];
+ char *name;
+
+ sprintf(tmp, "PCI %s PE %d PORT %d", str, pe, port);
+ name = alloc_bootmem(strlen(tmp) + 1);
+ strcpy(name, tmp);
+
+ return name;
+}
+
+inline struct io7 *
+marvel_next_io7(struct io7 *prev)
+{
+ return (prev ? prev->next : io7_head);
+}
+
+struct io7 *
+marvel_find_io7(int pe)
+{
+ struct io7 *io7;
+
+ for (io7 = io7_head; io7 && io7->pe != pe; io7 = io7->next)
+ continue;
+
+ return io7;
+}
+
+static struct io7 * __init
+alloc_io7(unsigned int pe)
+{
+ struct io7 *io7;
+ struct io7 *insp;
+ int h;
+
+ if (marvel_find_io7(pe)) {
+ printk(KERN_WARNING "IO7 at PE %d already allocated!\n", pe);
+ return NULL;
+ }
+
+ io7 = alloc_bootmem(sizeof(*io7));
+ io7->pe = pe;
+ spin_lock_init(&io7->irq_lock);
+
+ for (h = 0; h < 4; h++) {
+ io7->ports[h].io7 = io7;
+ io7->ports[h].port = h;
+ io7->ports[h].enabled = 0; /* default to disabled */
+ }
+
+ /*
+ * Insert in pe sorted order.
+ */
+ if (NULL == io7_head) /* empty list */
+ io7_head = io7;
+ else if (io7_head->pe > io7->pe) { /* insert at head */
+ io7->next = io7_head;
+ io7_head = io7;
+ } else { /* insert at position */
+ for (insp = io7_head; insp; insp = insp->next) {
+ if (insp->pe == io7->pe) {
+ printk(KERN_ERR "Too many IO7s at PE %d\n",
+ io7->pe);
+ return NULL;
+ }
+
+ if (NULL == insp->next ||
+ insp->next->pe > io7->pe) { /* insert here */
+ io7->next = insp->next;
+ insp->next = io7;
+ break;
+ }
+ }
+
+ if (NULL == insp) { /* couldn't insert ?!? */
+ printk(KERN_WARNING "Failed to insert IO7 at PE %d "
+ " - adding at head of list\n", io7->pe);
+ io7->next = io7_head;
+ io7_head = io7;
+ }
+ }
+
+ return io7;
+}
+
+void
+io7_clear_errors(struct io7 *io7)
+{
+ io7_port7_csrs *p7csrs;
+ io7_ioport_csrs *csrs;
+ int port;
+
+
+ /*
+ * First the IO ports.
+ */
+ for (port = 0; port < 4; port++) {
+ csrs = IO7_CSRS_KERN(io7->pe, port);
+
+ csrs->POx_ERR_SUM.csr = -1UL;
+ csrs->POx_TLB_ERR.csr = -1UL;
+ csrs->POx_SPL_COMPLT.csr = -1UL;
+ csrs->POx_TRANS_SUM.csr = -1UL;
+ }
+
+ /*
+ * Then the common ones.
+ */
+ p7csrs = IO7_PORT7_CSRS_KERN(io7->pe);
+
+ p7csrs->PO7_ERROR_SUM.csr = -1UL;
+ p7csrs->PO7_UNCRR_SYM.csr = -1UL;
+ p7csrs->PO7_CRRCT_SYM.csr = -1UL;
+}
+
+
+/*
+ * IO7 PCI, PCI/X, AGP configuration.
+ */
+static void __init
+io7_init_hose(struct io7 *io7, int port)
+{
+ static int hose_index = 0;
+
+ struct pci_controller *hose = alloc_pci_controller();
+ struct io7_port *io7_port = &io7->ports[port];
+ io7_ioport_csrs *csrs = IO7_CSRS_KERN(io7->pe, port);
+ int i;
+
+ hose->index = hose_index++; /* arbitrary */
+
+ /*
+ * We don't have an isa or legacy hose, but glibc expects to be
+ * able to use the bus == 0 / dev == 0 form of the iobase syscall
+ * to determine information about the i/o system. Since XFree86
+ * relies on glibc's determination to tell whether or not to use
+ * sparse access, we need to point the pci_isa_hose at a real hose
+ * so at least that determination is correct.
+ */
+ if (hose->index == 0)
+ pci_isa_hose = hose;
+
+ io7_port->csrs = csrs;
+ io7_port->hose = hose;
+ hose->sysdata = io7_port;
+
+ hose->io_space = alloc_resource();
+ hose->mem_space = alloc_resource();
+
+ /*
+ * Base addresses for userland consumption. Since these are going
+ * to be mapped, they are pure physical addresses.
+ */
+ hose->sparse_mem_base = hose->sparse_io_base = 0;
+ hose->dense_mem_base = IO7_MEM_PHYS(io7->pe, port);
+ hose->dense_io_base = IO7_IO_PHYS(io7->pe, port);
+
+ /*
+ * Base addresses and resource ranges for kernel consumption.
+ */
+ hose->config_space_base = (unsigned long)IO7_CONF_KERN(io7->pe, port);
+
+ hose->io_space->start = (unsigned long)IO7_IO_KERN(io7->pe, port);
+ hose->io_space->end = hose->io_space->start + IO7_IO_SPACE - 1;
+ hose->io_space->name = mk_resource_name(io7->pe, port, "IO");
+ hose->io_space->flags = IORESOURCE_IO;
+
+ hose->mem_space->start = (unsigned long)IO7_MEM_KERN(io7->pe, port);
+ hose->mem_space->end = hose->mem_space->start + IO7_MEM_SPACE - 1;
+ hose->mem_space->name = mk_resource_name(io7->pe, port, "MEM");
+ hose->mem_space->flags = IORESOURCE_MEM;
+
+ if (request_resource(&ioport_resource, hose->io_space) < 0)
+ printk(KERN_ERR "Failed to request IO on hose %d\n",
+ hose->index);
+ if (request_resource(&iomem_resource, hose->mem_space) < 0)
+ printk(KERN_ERR "Failed to request MEM on hose %d\n",
+ hose->index);
+
+ /*
+ * Save the existing DMA window settings for later restoration.
+ */
+ for (i = 0; i < 4; i++) {
+ io7_port->saved_wbase[i] = csrs->POx_WBASE[i].csr;
+ io7_port->saved_wmask[i] = csrs->POx_WMASK[i].csr;
+ io7_port->saved_tbase[i] = csrs->POx_TBASE[i].csr;
+ }
+
+ /*
+ * Set up the PCI to main memory translation windows.
+ *
+ * Window 0 is scatter-gather 8MB at 8MB
+ * Window 1 is direct access 1GB at 2GB
+ * Window 2 is scatter-gather (up-to) 1GB at 3GB
+ * Window 3 is disabled
+ */
+
+ /*
+ * TBIA before modifying windows.
+ */
+ marvel_pci_tbi(hose, 0, -1);
+
+ /*
+ * Set up window 0 for scatter-gather 8MB at 8MB.
+ */
+ hose->sg_isa = iommu_arena_new_node(marvel_cpuid_to_nid(io7->pe),
+ hose, 0x00800000, 0x00800000, 0);
+ hose->sg_isa->align_entry = 8; /* cache line boundary */
+ csrs->POx_WBASE[0].csr =
+ hose->sg_isa->dma_base | wbase_m_ena | wbase_m_sg;
+ csrs->POx_WMASK[0].csr = (hose->sg_isa->size - 1) & wbase_m_addr;
+ csrs->POx_TBASE[0].csr = virt_to_phys(hose->sg_isa->ptes);
+
+ /*
+ * Set up window 1 for direct-mapped 1GB at 2GB.
+ */
+ csrs->POx_WBASE[1].csr = __direct_map_base | wbase_m_ena;
+ csrs->POx_WMASK[1].csr = (__direct_map_size - 1) & wbase_m_addr;
+ csrs->POx_TBASE[1].csr = 0;
+
+ /*
+ * Set up window 2 for scatter-gather (up-to) 1GB at 3GB.
+ */
+ hose->sg_pci = iommu_arena_new_node(marvel_cpuid_to_nid(io7->pe),
+ hose, 0xc0000000, 0x40000000, 0);
+ hose->sg_pci->align_entry = 8; /* cache line boundary */
+ csrs->POx_WBASE[2].csr =
+ hose->sg_pci->dma_base | wbase_m_ena | wbase_m_sg;
+ csrs->POx_WMASK[2].csr = (hose->sg_pci->size - 1) & wbase_m_addr;
+ csrs->POx_TBASE[2].csr = virt_to_phys(hose->sg_pci->ptes);
+
+ /*
+ * Disable window 3.
+ */
+ csrs->POx_WBASE[3].csr = 0;
+
+ /*
+ * Make sure that the AGP Monster Window is disabled.
+ */
+ csrs->POx_CTRL.csr &= ~(1UL << 61);
+
+#if 1
+ printk("FIXME: disabling master aborts\n");
+ csrs->POx_MSK_HEI.csr &= ~(3UL << 14);
+#endif
+ /*
+ * TBIA after modifying windows.
+ */
+ marvel_pci_tbi(hose, 0, -1);
+}
+
+static void __init
+marvel_init_io7(struct io7 *io7)
+{
+ int i;
+
+ printk("Initializing IO7 at PID %d\n", io7->pe);
+
+ /*
+ * Get the Port 7 CSR pointer.
+ */
+ io7->csrs = IO7_PORT7_CSRS_KERN(io7->pe);
+
+ /*
+ * Init this IO7's hoses.
+ */
+ for (i = 0; i < IO7_NUM_PORTS; i++) {
+ io7_ioport_csrs *csrs = IO7_CSRS_KERN(io7->pe, i);
+ if (csrs->POx_CACHE_CTL.csr == 8) {
+ io7->ports[i].enabled = 1;
+ io7_init_hose(io7, i);
+ }
+ }
+}
+
+void
+marvel_io7_present(gct6_node *node)
+{
+ int pe;
+
+ if (node->type != GCT_TYPE_HOSE ||
+ node->subtype != GCT_SUBTYPE_IO_PORT_MODULE)
+ return;
+
+ pe = (node->id >> 8) & 0xff;
+ printk("Found an IO7 at PID %d\n", pe);
+
+ alloc_io7(pe);
+}
+
+static void __init
+marvel_init_vga_hose(void)
+{
+#ifdef CONFIG_VGA_HOSE
+ u64 *pu64 = (u64 *)((u64)hwrpb + hwrpb->ctbt_offset);
+
+ if (pu64[7] == 3) { /* TERM_TYPE == graphics */
+ struct pci_controller *hose = NULL;
+ int h = (pu64[30] >> 24) & 0xff; /* TERM_OUT_LOC, hose # */
+ struct io7 *io7;
+ int pid, port;
+
+ /* FIXME - encoding is going to have to change for Marvel
+ * since hose will be able to overflow a byte...
+ * need to fix this decode when the console
+ * changes its encoding
+ */
+ printk("console graphics is on hose %d (console)\n", h);
+
+ /*
+ * The console's hose numbering is:
+ *
+ * hose<n:2>: PID
+ * hose<1:0>: PORT
+ *
+ * We need to find the hose at that pid and port
+ */
+ pid = h >> 2;
+ port = h & 3;
+ if ((io7 = marvel_find_io7(pid)))
+ hose = io7->ports[port].hose;
+
+ if (hose) {
+ printk("Console graphics on hose %d\n", hose->index);
+ pci_vga_hose = hose;
+ }
+ }
+#endif /* CONFIG_VGA_HOSE */
+}
+
+gct6_search_struct gct_wanted_node_list[] = {
+ { GCT_TYPE_HOSE, GCT_SUBTYPE_IO_PORT_MODULE, marvel_io7_present },
+ { 0, 0, NULL }
+};
+
+/*
+ * In case the GCT is not complete, let the user specify PIDs with IO7s
+ * at boot time. Syntax is 'io7=a,b,c,...,n' where a-n are the PIDs (decimal)
+ * where IO7s are connected
+ */
+static int __init
+marvel_specify_io7(char *str)
+{
+ unsigned long pid;
+ struct io7 *io7;
+ char *pchar;
+
+ do {
+ pid = simple_strtoul(str, &pchar, 0);
+ if (pchar != str) {
+ printk("User-specified IO7 at PID %lu\n", pid);
+ io7 = alloc_io7(pid);
+ if (io7) marvel_init_io7(io7);
+ }
+
+ if (pchar == str) pchar++;
+ str = pchar;
+ } while(*str);
+
+ return 0;
+}
+__setup("io7=", marvel_specify_io7);
+
+void __init
+marvel_init_arch(void)
+{
+ struct io7 *io7;
+
+ /* With multiple PCI busses, we play with I/O as physical addrs. */
+ ioport_resource.end = ~0UL;
+
+ /* PCI DMA Direct Mapping is 1GB at 2GB. */
+ __direct_map_base = 0x80000000;
+ __direct_map_size = 0x40000000;
+
+ /* Parse the config tree. */
+ gct6_find_nodes(GCT_NODE_PTR(0), gct_wanted_node_list);
+
+ /* Init the io7s. */
+ for (io7 = NULL; NULL != (io7 = marvel_next_io7(io7)); )
+ marvel_init_io7(io7);
+
+ /* Check for graphic console location (if any). */
+ marvel_init_vga_hose();
+}
+
+void
+marvel_kill_arch(int mode)
+{
+}
+
+
+/*
+ * PCI Configuration Space access functions
+ *
+ * Configuration space addresses have the following format:
+ *
+ * |2 2 2 2|1 1 1 1|1 1 1 1|1 1
+ * |3 2 1 0|9 8 7 6|5 4 3 2|1 0 9 8|7 6 5 4|3 2 1 0
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * |B|B|B|B|B|B|B|B|D|D|D|D|D|F|F|F|R|R|R|R|R|R|R|R|
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ *
+ * n:24 reserved for hose base
+ * 23:16 bus number (8 bits = 128 possible buses)
+ * 15:11 Device number (5 bits)
+ * 10:8 function number
+ * 7:2 register number
+ *
+ * Notes:
+ * IO7 determines whether to use a type 0 or type 1 config cycle
+ * based on the bus number. Therefore the bus number must be set
+ * to 0 for the root bus on any hose.
+ *
+ * The function number selects which function of a multi-function device
+ * (e.g., SCSI and Ethernet).
+ *
+ */
+
+static inline unsigned long
+build_conf_addr(struct pci_controller *hose, u8 bus,
+ unsigned int devfn, int where)
+{
+ return (hose->config_space_base | (bus << 16) | (devfn << 8) | where);
+}
+
+static unsigned long
+mk_conf_addr(struct pci_bus *pbus, unsigned int devfn, int where)
+{
+ struct pci_controller *hose = pbus->sysdata;
+ struct io7_port *io7_port;
+ unsigned long addr = 0;
+ u8 bus = pbus->number;
+
+ if (!hose)
+ return addr;
+
+ /* Check for enabled. */
+ io7_port = hose->sysdata;
+ if (!io7_port->enabled)
+ return addr;
+
+ if (!pbus->parent) { /* No parent means peer PCI bus. */
+ /* Don't support idsel > 20 on primary bus. */
+ if (devfn >= PCI_DEVFN(21, 0))
+ return addr;
+ bus = 0;
+ }
+
+ addr = build_conf_addr(hose, bus, devfn, where);
+
+ DBG_CFG(("mk_conf_addr: returning pci_addr 0x%lx\n", addr));
+ return addr;
+}
+
+static int
+marvel_read_config(struct pci_bus *bus, unsigned int devfn, int where,
+ int size, u32 *value)
+{
+ unsigned long addr;
+
+ if (0 == (addr = mk_conf_addr(bus, devfn, where)))
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ switch(size) {
+ case 1:
+ *value = __kernel_ldbu(*(vucp)addr);
+ break;
+ case 2:
+ *value = __kernel_ldwu(*(vusp)addr);
+ break;
+ case 4:
+ *value = *(vuip)addr;
+ break;
+ default:
+ return PCIBIOS_FUNC_NOT_SUPPORTED;
+ }
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int
+marvel_write_config(struct pci_bus *bus, unsigned int devfn, int where,
+ int size, u32 value)
+{
+ unsigned long addr;
+
+ if (0 == (addr = mk_conf_addr(bus, devfn, where)))
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ switch (size) {
+ case 1:
+ __kernel_stb(value, *(vucp)addr);
+ mb();
+ __kernel_ldbu(*(vucp)addr);
+ break;
+ case 2:
+ __kernel_stw(value, *(vusp)addr);
+ mb();
+ __kernel_ldwu(*(vusp)addr);
+ break;
+ case 4:
+ *(vuip)addr = value;
+ mb();
+ *(vuip)addr;
+ break;
+ default:
+ return PCIBIOS_FUNC_NOT_SUPPORTED;
+ }
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+struct pci_ops marvel_pci_ops =
+{
+ .read = marvel_read_config,
+ .write = marvel_write_config,
+};
+
+
+/*
+ * Other PCI helper functions.
+ */
+void
+marvel_pci_tbi(struct pci_controller *hose, dma_addr_t start, dma_addr_t end)
+{
+ io7_ioport_csrs *csrs = ((struct io7_port *)hose->sysdata)->csrs;
+
+ wmb();
+ csrs->POx_SG_TBIA.csr = 0;
+ mb();
+ csrs->POx_SG_TBIA.csr;
+}
+
+
+
+/*
+ * RTC Support
+ */
+struct marvel_rtc_access_info {
+ unsigned long function;
+ unsigned long index;
+ unsigned long data;
+};
+
+static void
+__marvel_access_rtc(void *info)
+{
+ struct marvel_rtc_access_info *rtc_access = info;
+
+ register unsigned long __r0 __asm__("$0");
+ register unsigned long __r16 __asm__("$16") = rtc_access->function;
+ register unsigned long __r17 __asm__("$17") = rtc_access->index;
+ register unsigned long __r18 __asm__("$18") = rtc_access->data;
+
+ __asm__ __volatile__(
+ "call_pal %4 # cserve rtc"
+ : "=r"(__r16), "=r"(__r17), "=r"(__r18), "=r"(__r0)
+ : "i"(PAL_cserve), "0"(__r16), "1"(__r17), "2"(__r18)
+ : "$1", "$22", "$23", "$24", "$25");
+
+ rtc_access->data = __r0;
+}
+
+static u8
+__marvel_rtc_io(u8 b, unsigned long addr, int write)
+{
+ static u8 index = 0;
+
+ struct marvel_rtc_access_info rtc_access;
+ u8 ret = 0;
+
+ switch(addr) {
+ case 0x70: /* RTC_PORT(0) */
+ if (write) index = b;
+ ret = index;
+ break;
+
+ case 0x71: /* RTC_PORT(1) */
+ rtc_access.index = index;
+ rtc_access.data = BCD_TO_BIN(b);
+ rtc_access.function = 0x48 + !write; /* GET/PUT_TOY */
+
+#ifdef CONFIG_SMP
+ if (smp_processor_id() != boot_cpuid)
+ smp_call_function_on_cpu(__marvel_access_rtc,
+ &rtc_access, 1, 1,
+ cpumask_of_cpu(boot_cpuid));
+ else
+ __marvel_access_rtc(&rtc_access);
+#else
+ __marvel_access_rtc(&rtc_access);
+#endif
+ ret = BIN_TO_BCD(rtc_access.data);
+ break;
+
+ default:
+ printk(KERN_WARNING "Illegal RTC port %lx\n", addr);
+ break;
+ }
+
+ return ret;
+}
+
+
+/*
+ * IO map support.
+ */
+
+#define __marvel_is_mem_vga(a) (((a) >= 0xa0000) && ((a) <= 0xc0000))
+
+void __iomem *
+marvel_ioremap(unsigned long addr, unsigned long size)
+{
+ struct pci_controller *hose;
+ unsigned long baddr, last;
+ struct vm_struct *area;
+ unsigned long vaddr;
+ unsigned long *ptes;
+ unsigned long pfn;
+
+ /*
+ * Adjust the addr.
+ */
+#ifdef CONFIG_VGA_HOSE
+ if (pci_vga_hose && __marvel_is_mem_vga(addr)) {
+ addr += pci_vga_hose->mem_space->start;
+ }
+#endif
+
+ /*
+ * Find the hose.
+ */
+ for (hose = hose_head; hose; hose = hose->next) {
+ if ((addr >> 32) == (hose->mem_space->start >> 32))
+ break;
+ }
+ if (!hose)
+ return NULL;
+
+ /*
+ * We have the hose - calculate the bus limits.
+ */
+ baddr = addr - hose->mem_space->start;
+ last = baddr + size - 1;
+
+ /*
+ * Is it direct-mapped?
+ */
+ if ((baddr >= __direct_map_base) &&
+ ((baddr + size - 1) < __direct_map_base + __direct_map_size)) {
+ addr = IDENT_ADDR | (baddr - __direct_map_base);
+ return (void __iomem *) addr;
+ }
+
+ /*
+ * Check the scatter-gather arena.
+ */
+ if (hose->sg_pci &&
+ baddr >= (unsigned long)hose->sg_pci->dma_base &&
+ last < (unsigned long)hose->sg_pci->dma_base + hose->sg_pci->size) {
+
+ /*
+ * Adjust the limits (mappings must be page aligned)
+ */
+ baddr -= hose->sg_pci->dma_base;
+ last -= hose->sg_pci->dma_base;
+ baddr &= PAGE_MASK;
+ size = PAGE_ALIGN(last) - baddr;
+
+ /*
+ * Map it.
+ */
+ area = get_vm_area(size, VM_IOREMAP);
+ if (!area)
+ return NULL;
+
+ ptes = hose->sg_pci->ptes;
+ for (vaddr = (unsigned long)area->addr;
+ baddr <= last;
+ baddr += PAGE_SIZE, vaddr += PAGE_SIZE) {
+ pfn = ptes[baddr >> PAGE_SHIFT];
+ if (!(pfn & 1)) {
+ printk("ioremap failed... pte not valid...\n");
+ vfree(area->addr);
+ return NULL;
+ }
+ pfn >>= 1; /* make it a true pfn */
+
+ if (__alpha_remap_area_pages(vaddr,
+ pfn << PAGE_SHIFT,
+ PAGE_SIZE, 0)) {
+ printk("FAILED to map...\n");
+ vfree(area->addr);
+ return NULL;
+ }
+ }
+
+ flush_tlb_all();
+
+ vaddr = (unsigned long)area->addr + (addr & ~PAGE_MASK);
+
+ return (void __iomem *) vaddr;
+ }
+
+ return NULL;
+}
+
+void
+marvel_iounmap(volatile void __iomem *xaddr)
+{
+ unsigned long addr = (unsigned long) xaddr;
+ if (addr >= VMALLOC_START)
+ vfree((void *)(PAGE_MASK & addr));
+}
+
+int
+marvel_is_mmio(const volatile void __iomem *xaddr)
+{
+ unsigned long addr = (unsigned long) xaddr;
+
+ if (addr >= VMALLOC_START)
+ return 1;
+ else
+ return (addr & 0xFF000000UL) == 0;
+}
+
+#define __marvel_is_port_vga(a) \
+ (((a) >= 0x3b0) && ((a) < 0x3e0) && ((a) != 0x3b3) && ((a) != 0x3d3))
+#define __marvel_is_port_kbd(a) (((a) == 0x60) || ((a) == 0x64))
+#define __marvel_is_port_rtc(a) (((a) == 0x70) || ((a) == 0x71))
+
+void __iomem *marvel_ioportmap (unsigned long addr)
+{
+ if (__marvel_is_port_rtc (addr) || __marvel_is_port_kbd(addr))
+ ;
+#ifdef CONFIG_VGA_HOSE
+ else if (__marvel_is_port_vga (addr) && pci_vga_hose)
+ addr += pci_vga_hose->io_space->start;
+#endif
+ else
+ return NULL;
+ return (void __iomem *)addr;
+}
+
+unsigned int
+marvel_ioread8(void __iomem *xaddr)
+{
+ unsigned long addr = (unsigned long) xaddr;
+ if (__marvel_is_port_kbd(addr))
+ return 0;
+ else if (__marvel_is_port_rtc(addr))
+ return __marvel_rtc_io(0, addr, 0);
+ else
+ return __kernel_ldbu(*(vucp)addr);
+}
+
+void
+marvel_iowrite8(u8 b, void __iomem *xaddr)
+{
+ unsigned long addr = (unsigned long) xaddr;
+ if (__marvel_is_port_kbd(addr))
+ return;
+ else if (__marvel_is_port_rtc(addr))
+ __marvel_rtc_io(b, addr, 1);
+ else
+ __kernel_stb(b, *(vucp)addr);
+}
+
+#ifndef CONFIG_ALPHA_GENERIC
+EXPORT_SYMBOL(marvel_ioremap);
+EXPORT_SYMBOL(marvel_iounmap);
+EXPORT_SYMBOL(marvel_is_mmio);
+EXPORT_SYMBOL(marvel_ioportmap);
+EXPORT_SYMBOL(marvel_ioread8);
+EXPORT_SYMBOL(marvel_iowrite8);
+#endif
+
+/*
+ * NUMA Support
+ */
+/**********
+ * FIXME - for now each cpu is a node by itself
+ * -- no real support for striped mode
+ **********
+ */
+int
+marvel_pa_to_nid(unsigned long pa)
+{
+ int cpuid;
+
+ if ((pa >> 43) & 1) /* I/O */
+ cpuid = (~(pa >> 35) & 0xff);
+ else /* mem */
+ cpuid = ((pa >> 34) & 0x3) | ((pa >> (37 - 2)) & (0x1f << 2));
+
+ return marvel_cpuid_to_nid(cpuid);
+}
+
+int
+marvel_cpuid_to_nid(int cpuid)
+{
+ return cpuid;
+}
+
+unsigned long
+marvel_node_mem_start(int nid)
+{
+ unsigned long pa;
+
+ pa = (nid & 0x3) | ((nid & (0x1f << 2)) << 1);
+ pa <<= 34;
+
+ return pa;
+}
+
+unsigned long
+marvel_node_mem_size(int nid)
+{
+ return 16UL * 1024 * 1024 * 1024; /* 16GB */
+}
+
+
+/*
+ * AGP GART Support.
+ */
+#include <linux/agp_backend.h>
+#include <asm/agp_backend.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+
+struct marvel_agp_aperture {
+ struct pci_iommu_arena *arena;
+ long pg_start;
+ long pg_count;
+};
+
+static int
+marvel_agp_setup(alpha_agp_info *agp)
+{
+ struct marvel_agp_aperture *aper;
+
+ if (!alpha_agpgart_size)
+ return -ENOMEM;
+
+ aper = kmalloc(sizeof(*aper), GFP_KERNEL);
+ if (aper == NULL) return -ENOMEM;
+
+ aper->arena = agp->hose->sg_pci;
+ aper->pg_count = alpha_agpgart_size / PAGE_SIZE;
+ aper->pg_start = iommu_reserve(aper->arena, aper->pg_count,
+ aper->pg_count - 1);
+
+ if (aper->pg_start < 0) {
+ printk(KERN_ERR "Failed to reserve AGP memory\n");
+ kfree(aper);
+ return -ENOMEM;
+ }
+
+ agp->aperture.bus_base =
+ aper->arena->dma_base + aper->pg_start * PAGE_SIZE;
+ agp->aperture.size = aper->pg_count * PAGE_SIZE;
+ agp->aperture.sysdata = aper;
+
+ return 0;
+}
+
+static void
+marvel_agp_cleanup(alpha_agp_info *agp)
+{
+ struct marvel_agp_aperture *aper = agp->aperture.sysdata;
+ int status;
+
+ status = iommu_release(aper->arena, aper->pg_start, aper->pg_count);
+ if (status == -EBUSY) {
+ printk(KERN_WARNING
+ "Attempted to release bound AGP memory - unbinding\n");
+ iommu_unbind(aper->arena, aper->pg_start, aper->pg_count);
+ status = iommu_release(aper->arena, aper->pg_start,
+ aper->pg_count);
+ }
+ if (status < 0)
+ printk(KERN_ERR "Failed to release AGP memory\n");
+
+ kfree(aper);
+ kfree(agp);
+}
+
+static int
+marvel_agp_configure(alpha_agp_info *agp)
+{
+ io7_ioport_csrs *csrs = ((struct io7_port *)agp->hose->sysdata)->csrs;
+ struct io7 *io7 = ((struct io7_port *)agp->hose->sysdata)->io7;
+ unsigned int new_rate = 0;
+ unsigned long agp_pll;
+
+ /*
+ * Check the requested mode against the PLL setting.
+ * The agpgart_be code has not programmed the card yet,
+ * so we can still tweak mode here.
+ */
+ agp_pll = io7->csrs->POx_RST[IO7_AGP_PORT].csr;
+ switch(IO7_PLL_RNGB(agp_pll)) {
+ case 0x4: /* 2x only */
+ /*
+ * The PLL is only programmed for 2x, so adjust the
+ * rate to 2x, if necessary.
+ */
+ if (agp->mode.bits.rate != 2)
+ new_rate = 2;
+ break;
+
+ case 0x6: /* 1x / 4x */
+ /*
+ * The PLL is programmed for 1x or 4x. Don't go faster
+ * than requested, so if the requested rate is 2x, use 1x.
+ */
+ if (agp->mode.bits.rate == 2)
+ new_rate = 1;
+ break;
+
+ default: /* ??????? */
+ /*
+ * Don't know what this PLL setting is, take the requested
+ * rate, but warn the user.
+ */
+ printk("%s: unknown PLL setting RNGB=%lx (PLL6_CTL=%016lx)\n",
+ __FUNCTION__, IO7_PLL_RNGB(agp_pll), agp_pll);
+ break;
+ }
+
+ /*
+ * Set the new rate, if necessary.
+ */
+ if (new_rate) {
+ printk("Requested AGP Rate %dX not compatible "
+ "with PLL setting - using %dX\n",
+ agp->mode.bits.rate,
+ new_rate);
+
+ agp->mode.bits.rate = new_rate;
+ }
+
+ printk("Enabling AGP on hose %d: %dX%s RQ %d\n",
+ agp->hose->index, agp->mode.bits.rate,
+ agp->mode.bits.sba ? " - SBA" : "", agp->mode.bits.rq);
+
+ csrs->AGP_CMD.csr = agp->mode.lw;
+
+ return 0;
+}
+
+static int
+marvel_agp_bind_memory(alpha_agp_info *agp, off_t pg_start, struct agp_memory *mem)
+{
+ struct marvel_agp_aperture *aper = agp->aperture.sysdata;
+ return iommu_bind(aper->arena, aper->pg_start + pg_start,
+ mem->page_count, mem->memory);
+}
+
+static int
+marvel_agp_unbind_memory(alpha_agp_info *agp, off_t pg_start, struct agp_memory *mem)
+{
+ struct marvel_agp_aperture *aper = agp->aperture.sysdata;
+ return iommu_unbind(aper->arena, aper->pg_start + pg_start,
+ mem->page_count);
+}
+
+static unsigned long
+marvel_agp_translate(alpha_agp_info *agp, dma_addr_t addr)
+{
+ struct marvel_agp_aperture *aper = agp->aperture.sysdata;
+ unsigned long baddr = addr - aper->arena->dma_base;
+ unsigned long pte;
+
+ if (addr < agp->aperture.bus_base ||
+ addr >= agp->aperture.bus_base + agp->aperture.size) {
+ printk("%s: addr out of range\n", __FUNCTION__);
+ return -EINVAL;
+ }
+
+ pte = aper->arena->ptes[baddr >> PAGE_SHIFT];
+ if (!(pte & 1)) {
+ printk("%s: pte not valid\n", __FUNCTION__);
+ return -EINVAL;
+ }
+ return (pte >> 1) << PAGE_SHIFT;
+}
+
+struct alpha_agp_ops marvel_agp_ops =
+{
+ .setup = marvel_agp_setup,
+ .cleanup = marvel_agp_cleanup,
+ .configure = marvel_agp_configure,
+ .bind = marvel_agp_bind_memory,
+ .unbind = marvel_agp_unbind_memory,
+ .translate = marvel_agp_translate
+};
+
+alpha_agp_info *
+marvel_agp_info(void)
+{
+ struct pci_controller *hose;
+ io7_ioport_csrs *csrs;
+ alpha_agp_info *agp;
+ struct io7 *io7;
+
+ /*
+ * Find the first IO7 with an AGP card.
+ *
+ * FIXME -- there should be a better way (we want to be able to
+ * specify and what if the agp card is not video???)
+ */
+ hose = NULL;
+ for (io7 = NULL; (io7 = marvel_next_io7(io7)) != NULL; ) {
+ struct pci_controller *h;
+ vuip addr;
+
+ if (!io7->ports[IO7_AGP_PORT].enabled)
+ continue;
+
+ h = io7->ports[IO7_AGP_PORT].hose;
+ addr = (vuip)build_conf_addr(h, 0, PCI_DEVFN(5, 0), 0);
+
+ if (*addr != 0xffffffffu) {
+ hose = h;
+ break;
+ }
+ }
+
+ if (!hose || !hose->sg_pci)
+ return NULL;
+
+ printk("MARVEL - using hose %d as AGP\n", hose->index);
+
+ /*
+ * Get the csrs from the hose.
+ */
+ csrs = ((struct io7_port *)hose->sysdata)->csrs;
+
+ /*
+ * Allocate the info structure.
+ */
+ agp = kmalloc(sizeof(*agp), GFP_KERNEL);
+
+ /*
+ * Fill it in.
+ */
+ agp->hose = hose;
+ agp->private = NULL;
+ agp->ops = &marvel_agp_ops;
+
+ /*
+ * Aperture - not configured until ops.setup().
+ */
+ agp->aperture.bus_base = 0;
+ agp->aperture.size = 0;
+ agp->aperture.sysdata = NULL;
+
+ /*
+ * Capabilities.
+ *
+ * NOTE: IO7 reports through AGP_STAT that it can support a read queue
+ * depth of 17 (rq = 0x10). It actually only supports a depth of
+ * 16 (rq = 0xf).
+ */
+ agp->capability.lw = csrs->AGP_STAT.csr;
+ agp->capability.bits.rq = 0xf;
+
+ /*
+ * Mode.
+ */
+ agp->mode.lw = csrs->AGP_CMD.csr;
+
+ return agp;
+}
diff --git a/arch/alpha/kernel/core_mcpcia.c b/arch/alpha/kernel/core_mcpcia.c
new file mode 100644
index 000000000000..28849c894153
--- /dev/null
+++ b/arch/alpha/kernel/core_mcpcia.c
@@ -0,0 +1,618 @@
+/*
+ * linux/arch/alpha/kernel/core_mcpcia.c
+ *
+ * Based on code written by David A Rusling (david.rusling@reo.mts.dec.com).
+ *
+ * Code common to all MCbus-PCI Adaptor core logic chipsets
+ */
+
+#define __EXTERN_INLINE inline
+#include <asm/io.h>
+#include <asm/core_mcpcia.h>
+#undef __EXTERN_INLINE
+
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/sched.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+
+#include <asm/ptrace.h>
+
+#include "proto.h"
+#include "pci_impl.h"
+
+/*
+ * NOTE: Herein lie back-to-back mb instructions. They are magic.
+ * One plausible explanation is that the i/o controller does not properly
+ * handle the system transaction. Another involves timing. Ho hum.
+ */
+
+/*
+ * BIOS32-style PCI interface:
+ */
+
+#define DEBUG_CFG 0
+
+#if DEBUG_CFG
+# define DBG_CFG(args) printk args
+#else
+# define DBG_CFG(args)
+#endif
+
+#define MCPCIA_MAX_HOSES 4
+
+/*
+ * Given a bus, device, and function number, compute resulting
+ * configuration space address and setup the MCPCIA_HAXR2 register
+ * accordingly. It is therefore not safe to have concurrent
+ * invocations to configuration space access routines, but there
+ * really shouldn't be any need for this.
+ *
+ * Type 0:
+ *
+ * 3 3|3 3 2 2|2 2 2 2|2 2 2 2|1 1 1 1|1 1 1 1|1 1
+ * 3 2|1 0 9 8|7 6 5 4|3 2 1 0|9 8 7 6|5 4 3 2|1 0 9 8|7 6 5 4|3 2 1 0
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | | |D|D|D|D|D|D|D|D|D|D|D|D|D|D|D|D|D|D|D|D|D|F|F|F|R|R|R|R|R|R|0|0|
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ *
+ * 31:11 Device select bit.
+ * 10:8 Function number
+ * 7:2 Register number
+ *
+ * Type 1:
+ *
+ * 3 3|3 3 2 2|2 2 2 2|2 2 2 2|1 1 1 1|1 1 1 1|1 1
+ * 3 2|1 0 9 8|7 6 5 4|3 2 1 0|9 8 7 6|5 4 3 2|1 0 9 8|7 6 5 4|3 2 1 0
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | | | | | | | | | | |B|B|B|B|B|B|B|B|D|D|D|D|D|F|F|F|R|R|R|R|R|R|0|1|
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ *
+ * 31:24 reserved
+ * 23:16 bus number (8 bits = 128 possible buses)
+ * 15:11 Device number (5 bits)
+ * 10:8 function number
+ * 7:2 register number
+ *
+ * Notes:
+ * The function number selects which function of a multi-function device
+ * (e.g., SCSI and Ethernet).
+ *
+ * The register selects a DWORD (32 bit) register offset. Hence it
+ * doesn't get shifted by 2 bits as we want to "drop" the bottom two
+ * bits.
+ */
+
+static unsigned int
+conf_read(unsigned long addr, unsigned char type1,
+ struct pci_controller *hose)
+{
+ unsigned long flags;
+ unsigned long mid = MCPCIA_HOSE2MID(hose->index);
+ unsigned int stat0, value, temp, cpu;
+
+ cpu = smp_processor_id();
+
+ local_irq_save(flags);
+
+ DBG_CFG(("conf_read(addr=0x%lx, type1=%d, hose=%d)\n",
+ addr, type1, mid));
+
+ /* Reset status register to avoid losing errors. */
+ stat0 = *(vuip)MCPCIA_CAP_ERR(mid);
+ *(vuip)MCPCIA_CAP_ERR(mid) = stat0;
+ mb();
+ temp = *(vuip)MCPCIA_CAP_ERR(mid);
+ DBG_CFG(("conf_read: MCPCIA_CAP_ERR(%d) was 0x%x\n", mid, stat0));
+
+ mb();
+ draina();
+ mcheck_expected(cpu) = 1;
+ mcheck_taken(cpu) = 0;
+ mcheck_extra(cpu) = mid;
+ mb();
+
+ /* Access configuration space. */
+ value = *((vuip)addr);
+ mb();
+ mb(); /* magic */
+
+ if (mcheck_taken(cpu)) {
+ mcheck_taken(cpu) = 0;
+ value = 0xffffffffU;
+ mb();
+ }
+ mcheck_expected(cpu) = 0;
+ mb();
+
+ DBG_CFG(("conf_read(): finished\n"));
+
+ local_irq_restore(flags);
+ return value;
+}
+
+static void
+conf_write(unsigned long addr, unsigned int value, unsigned char type1,
+ struct pci_controller *hose)
+{
+ unsigned long flags;
+ unsigned long mid = MCPCIA_HOSE2MID(hose->index);
+ unsigned int stat0, temp, cpu;
+
+ cpu = smp_processor_id();
+
+ local_irq_save(flags); /* avoid getting hit by machine check */
+
+ /* Reset status register to avoid losing errors. */
+ stat0 = *(vuip)MCPCIA_CAP_ERR(mid);
+ *(vuip)MCPCIA_CAP_ERR(mid) = stat0; mb();
+ temp = *(vuip)MCPCIA_CAP_ERR(mid);
+ DBG_CFG(("conf_write: MCPCIA CAP_ERR(%d) was 0x%x\n", mid, stat0));
+
+ draina();
+ mcheck_expected(cpu) = 1;
+ mcheck_extra(cpu) = mid;
+ mb();
+
+ /* Access configuration space. */
+ *((vuip)addr) = value;
+ mb();
+ mb(); /* magic */
+ temp = *(vuip)MCPCIA_CAP_ERR(mid); /* read to force the write */
+ mcheck_expected(cpu) = 0;
+ mb();
+
+ DBG_CFG(("conf_write(): finished\n"));
+ local_irq_restore(flags);
+}
+
+static int
+mk_conf_addr(struct pci_bus *pbus, unsigned int devfn, int where,
+ struct pci_controller *hose, unsigned long *pci_addr,
+ unsigned char *type1)
+{
+ u8 bus = pbus->number;
+ unsigned long addr;
+
+ DBG_CFG(("mk_conf_addr(bus=%d,devfn=0x%x,hose=%d,where=0x%x,"
+ " pci_addr=0x%p, type1=0x%p)\n",
+ bus, devfn, hose->index, where, pci_addr, type1));
+
+ /* Type 1 configuration cycle for *ALL* busses. */
+ *type1 = 1;
+
+ if (!pbus->parent) /* No parent means peer PCI bus. */
+ bus = 0;
+ addr = (bus << 16) | (devfn << 8) | (where);
+ addr <<= 5; /* swizzle for SPARSE */
+ addr |= hose->config_space_base;
+
+ *pci_addr = addr;
+ DBG_CFG(("mk_conf_addr: returning pci_addr 0x%lx\n", addr));
+ return 0;
+}
+
+static int
+mcpcia_read_config(struct pci_bus *bus, unsigned int devfn, int where,
+ int size, u32 *value)
+{
+ struct pci_controller *hose = bus->sysdata;
+ unsigned long addr, w;
+ unsigned char type1;
+
+ if (mk_conf_addr(bus, devfn, where, hose, &addr, &type1))
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ addr |= (size - 1) * 8;
+ w = conf_read(addr, type1, hose);
+ switch (size) {
+ case 1:
+ *value = __kernel_extbl(w, where & 3);
+ break;
+ case 2:
+ *value = __kernel_extwl(w, where & 3);
+ break;
+ case 4:
+ *value = w;
+ break;
+ }
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int
+mcpcia_write_config(struct pci_bus *bus, unsigned int devfn, int where,
+ int size, u32 value)
+{
+ struct pci_controller *hose = bus->sysdata;
+ unsigned long addr;
+ unsigned char type1;
+
+ if (mk_conf_addr(bus, devfn, where, hose, &addr, &type1))
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ addr |= (size - 1) * 8;
+ value = __kernel_insql(value, where & 3);
+ conf_write(addr, value, type1, hose);
+ return PCIBIOS_SUCCESSFUL;
+}
+
+struct pci_ops mcpcia_pci_ops =
+{
+ .read = mcpcia_read_config,
+ .write = mcpcia_write_config,
+};
+
+void
+mcpcia_pci_tbi(struct pci_controller *hose, dma_addr_t start, dma_addr_t end)
+{
+ wmb();
+ *(vuip)MCPCIA_SG_TBIA(MCPCIA_HOSE2MID(hose->index)) = 0;
+ mb();
+}
+
+static int __init
+mcpcia_probe_hose(int h)
+{
+ int cpu = smp_processor_id();
+ int mid = MCPCIA_HOSE2MID(h);
+ unsigned int pci_rev;
+
+ /* Gotta be REAL careful. If hose is absent, we get an mcheck. */
+
+ mb();
+ mb();
+ draina();
+ wrmces(7);
+
+ mcheck_expected(cpu) = 2; /* indicates probing */
+ mcheck_taken(cpu) = 0;
+ mcheck_extra(cpu) = mid;
+ mb();
+
+ /* Access the bus revision word. */
+ pci_rev = *(vuip)MCPCIA_REV(mid);
+
+ mb();
+ mb(); /* magic */
+ if (mcheck_taken(cpu)) {
+ mcheck_taken(cpu) = 0;
+ pci_rev = 0xffffffff;
+ mb();
+ }
+ mcheck_expected(cpu) = 0;
+ mb();
+
+ return (pci_rev >> 16) == PCI_CLASS_BRIDGE_HOST;
+}
+
+static void __init
+mcpcia_new_hose(int h)
+{
+ struct pci_controller *hose;
+ struct resource *io, *mem, *hae_mem;
+ int mid = MCPCIA_HOSE2MID(h);
+
+ hose = alloc_pci_controller();
+ if (h == 0)
+ pci_isa_hose = hose;
+ io = alloc_resource();
+ mem = alloc_resource();
+ hae_mem = alloc_resource();
+
+ hose->io_space = io;
+ hose->mem_space = hae_mem;
+ hose->sparse_mem_base = MCPCIA_SPARSE(mid) - IDENT_ADDR;
+ hose->dense_mem_base = MCPCIA_DENSE(mid) - IDENT_ADDR;
+ hose->sparse_io_base = MCPCIA_IO(mid) - IDENT_ADDR;
+ hose->dense_io_base = 0;
+ hose->config_space_base = MCPCIA_CONF(mid);
+ hose->index = h;
+
+ io->start = MCPCIA_IO(mid) - MCPCIA_IO_BIAS;
+ io->end = io->start + 0xffff;
+ io->name = pci_io_names[h];
+ io->flags = IORESOURCE_IO;
+
+ mem->start = MCPCIA_DENSE(mid) - MCPCIA_MEM_BIAS;
+ mem->end = mem->start + 0xffffffff;
+ mem->name = pci_mem_names[h];
+ mem->flags = IORESOURCE_MEM;
+
+ hae_mem->start = mem->start;
+ hae_mem->end = mem->start + MCPCIA_MEM_MASK;
+ hae_mem->name = pci_hae0_name;
+ hae_mem->flags = IORESOURCE_MEM;
+
+ if (request_resource(&ioport_resource, io) < 0)
+ printk(KERN_ERR "Failed to request IO on hose %d\n", h);
+ if (request_resource(&iomem_resource, mem) < 0)
+ printk(KERN_ERR "Failed to request MEM on hose %d\n", h);
+ if (request_resource(mem, hae_mem) < 0)
+ printk(KERN_ERR "Failed to request HAE_MEM on hose %d\n", h);
+}
+
+static void
+mcpcia_pci_clr_err(int mid)
+{
+ *(vuip)MCPCIA_CAP_ERR(mid);
+ *(vuip)MCPCIA_CAP_ERR(mid) = 0xffffffff; /* Clear them all. */
+ mb();
+ *(vuip)MCPCIA_CAP_ERR(mid); /* Re-read for force write. */
+}
+
+static void __init
+mcpcia_startup_hose(struct pci_controller *hose)
+{
+ int mid = MCPCIA_HOSE2MID(hose->index);
+ unsigned int tmp;
+
+ mcpcia_pci_clr_err(mid);
+
+ /*
+ * Set up error reporting.
+ */
+ tmp = *(vuip)MCPCIA_CAP_ERR(mid);
+ tmp |= 0x0006; /* master/target abort */
+ *(vuip)MCPCIA_CAP_ERR(mid) = tmp;
+ mb();
+ tmp = *(vuip)MCPCIA_CAP_ERR(mid);
+
+ /*
+ * Set up the PCI->physical memory translation windows.
+ *
+ * Window 0 is scatter-gather 8MB at 8MB (for isa)
+ * Window 1 is scatter-gather (up to) 1GB at 1GB (for pci)
+ * Window 2 is direct access 2GB at 2GB
+ */
+ hose->sg_isa = iommu_arena_new(hose, 0x00800000, 0x00800000, 0);
+ hose->sg_pci = iommu_arena_new(hose, 0x40000000,
+ size_for_memory(0x40000000), 0);
+
+ __direct_map_base = 0x80000000;
+ __direct_map_size = 0x80000000;
+
+ *(vuip)MCPCIA_W0_BASE(mid) = hose->sg_isa->dma_base | 3;
+ *(vuip)MCPCIA_W0_MASK(mid) = (hose->sg_isa->size - 1) & 0xfff00000;
+ *(vuip)MCPCIA_T0_BASE(mid) = virt_to_phys(hose->sg_isa->ptes) >> 8;
+
+ *(vuip)MCPCIA_W1_BASE(mid) = hose->sg_pci->dma_base | 3;
+ *(vuip)MCPCIA_W1_MASK(mid) = (hose->sg_pci->size - 1) & 0xfff00000;
+ *(vuip)MCPCIA_T1_BASE(mid) = virt_to_phys(hose->sg_pci->ptes) >> 8;
+
+ *(vuip)MCPCIA_W2_BASE(mid) = __direct_map_base | 1;
+ *(vuip)MCPCIA_W2_MASK(mid) = (__direct_map_size - 1) & 0xfff00000;
+ *(vuip)MCPCIA_T2_BASE(mid) = 0;
+
+ *(vuip)MCPCIA_W3_BASE(mid) = 0x0;
+
+ mcpcia_pci_tbi(hose, 0, -1);
+
+ *(vuip)MCPCIA_HBASE(mid) = 0x0;
+ mb();
+
+ *(vuip)MCPCIA_HAE_MEM(mid) = 0U;
+ mb();
+ *(vuip)MCPCIA_HAE_MEM(mid); /* read it back. */
+ *(vuip)MCPCIA_HAE_IO(mid) = 0;
+ mb();
+ *(vuip)MCPCIA_HAE_IO(mid); /* read it back. */
+}
+
+void __init
+mcpcia_init_arch(void)
+{
+ /* With multiple PCI busses, we play with I/O as physical addrs. */
+ ioport_resource.end = ~0UL;
+
+ /* Allocate hose 0. That's the one that all the ISA junk hangs
+ off of, from which we'll be registering stuff here in a bit.
+ Other hose detection is done in mcpcia_init_hoses, which is
+ called from init_IRQ. */
+
+ mcpcia_new_hose(0);
+}
+
+/* This is called from init_IRQ, since we cannot take interrupts
+ before then. Which means we cannot do this in init_arch. */
+
+void __init
+mcpcia_init_hoses(void)
+{
+ struct pci_controller *hose;
+ int hose_count;
+ int h;
+
+ /* First, find how many hoses we have. */
+ hose_count = 0;
+ for (h = 0; h < MCPCIA_MAX_HOSES; ++h) {
+ if (mcpcia_probe_hose(h)) {
+ if (h != 0)
+ mcpcia_new_hose(h);
+ hose_count++;
+ }
+ }
+
+ printk("mcpcia_init_hoses: found %d hoses\n", hose_count);
+
+ /* Now do init for each hose. */
+ for (hose = hose_head; hose; hose = hose->next)
+ mcpcia_startup_hose(hose);
+}
+
+static void
+mcpcia_print_uncorrectable(struct el_MCPCIA_uncorrected_frame_mcheck *logout)
+{
+ struct el_common_EV5_uncorrectable_mcheck *frame;
+ int i;
+
+ frame = &logout->procdata;
+
+ /* Print PAL fields */
+ for (i = 0; i < 24; i += 2) {
+ printk(" paltmp[%d-%d] = %16lx %16lx\n",
+ i, i+1, frame->paltemp[i], frame->paltemp[i+1]);
+ }
+ for (i = 0; i < 8; i += 2) {
+ printk(" shadow[%d-%d] = %16lx %16lx\n",
+ i, i+1, frame->shadow[i],
+ frame->shadow[i+1]);
+ }
+ printk(" Addr of excepting instruction = %16lx\n",
+ frame->exc_addr);
+ printk(" Summary of arithmetic traps = %16lx\n",
+ frame->exc_sum);
+ printk(" Exception mask = %16lx\n",
+ frame->exc_mask);
+ printk(" Base address for PALcode = %16lx\n",
+ frame->pal_base);
+ printk(" Interrupt Status Reg = %16lx\n",
+ frame->isr);
+ printk(" CURRENT SETUP OF EV5 IBOX = %16lx\n",
+ frame->icsr);
+ printk(" I-CACHE Reg %s parity error = %16lx\n",
+ (frame->ic_perr_stat & 0x800L) ?
+ "Data" : "Tag",
+ frame->ic_perr_stat);
+ printk(" D-CACHE error Reg = %16lx\n",
+ frame->dc_perr_stat);
+ if (frame->dc_perr_stat & 0x2) {
+ switch (frame->dc_perr_stat & 0x03c) {
+ case 8:
+ printk(" Data error in bank 1\n");
+ break;
+ case 4:
+ printk(" Data error in bank 0\n");
+ break;
+ case 20:
+ printk(" Tag error in bank 1\n");
+ break;
+ case 10:
+ printk(" Tag error in bank 0\n");
+ break;
+ }
+ }
+ printk(" Effective VA = %16lx\n",
+ frame->va);
+ printk(" Reason for D-stream = %16lx\n",
+ frame->mm_stat);
+ printk(" EV5 SCache address = %16lx\n",
+ frame->sc_addr);
+ printk(" EV5 SCache TAG/Data parity = %16lx\n",
+ frame->sc_stat);
+ printk(" EV5 BC_TAG_ADDR = %16lx\n",
+ frame->bc_tag_addr);
+ printk(" EV5 EI_ADDR: Phys addr of Xfer = %16lx\n",
+ frame->ei_addr);
+ printk(" Fill Syndrome = %16lx\n",
+ frame->fill_syndrome);
+ printk(" EI_STAT reg = %16lx\n",
+ frame->ei_stat);
+ printk(" LD_LOCK = %16lx\n",
+ frame->ld_lock);
+}
+
+static void
+mcpcia_print_system_area(unsigned long la_ptr)
+{
+ struct el_common *frame;
+ struct pci_controller *hose;
+
+ struct IOD_subpacket {
+ unsigned long base;
+ unsigned int whoami;
+ unsigned int rsvd1;
+ unsigned int pci_rev;
+ unsigned int cap_ctrl;
+ unsigned int hae_mem;
+ unsigned int hae_io;
+ unsigned int int_ctl;
+ unsigned int int_reg;
+ unsigned int int_mask0;
+ unsigned int int_mask1;
+ unsigned int mc_err0;
+ unsigned int mc_err1;
+ unsigned int cap_err;
+ unsigned int rsvd2;
+ unsigned int pci_err1;
+ unsigned int mdpa_stat;
+ unsigned int mdpa_syn;
+ unsigned int mdpb_stat;
+ unsigned int mdpb_syn;
+ unsigned int rsvd3;
+ unsigned int rsvd4;
+ unsigned int rsvd5;
+ } *iodpp;
+
+ frame = (struct el_common *)la_ptr;
+ iodpp = (struct IOD_subpacket *) (la_ptr + frame->sys_offset);
+
+ for (hose = hose_head; hose; hose = hose->next, iodpp++) {
+
+ printk("IOD %d Register Subpacket - Bridge Base Address %16lx\n",
+ hose->index, iodpp->base);
+ printk(" WHOAMI = %8x\n", iodpp->whoami);
+ printk(" PCI_REV = %8x\n", iodpp->pci_rev);
+ printk(" CAP_CTRL = %8x\n", iodpp->cap_ctrl);
+ printk(" HAE_MEM = %8x\n", iodpp->hae_mem);
+ printk(" HAE_IO = %8x\n", iodpp->hae_io);
+ printk(" INT_CTL = %8x\n", iodpp->int_ctl);
+ printk(" INT_REG = %8x\n", iodpp->int_reg);
+ printk(" INT_MASK0 = %8x\n", iodpp->int_mask0);
+ printk(" INT_MASK1 = %8x\n", iodpp->int_mask1);
+ printk(" MC_ERR0 = %8x\n", iodpp->mc_err0);
+ printk(" MC_ERR1 = %8x\n", iodpp->mc_err1);
+ printk(" CAP_ERR = %8x\n", iodpp->cap_err);
+ printk(" PCI_ERR1 = %8x\n", iodpp->pci_err1);
+ printk(" MDPA_STAT = %8x\n", iodpp->mdpa_stat);
+ printk(" MDPA_SYN = %8x\n", iodpp->mdpa_syn);
+ printk(" MDPB_STAT = %8x\n", iodpp->mdpb_stat);
+ printk(" MDPB_SYN = %8x\n", iodpp->mdpb_syn);
+ }
+}
+
+void
+mcpcia_machine_check(unsigned long vector, unsigned long la_ptr,
+ struct pt_regs * regs)
+{
+ struct el_common *mchk_header;
+ struct el_MCPCIA_uncorrected_frame_mcheck *mchk_logout;
+ unsigned int cpu = smp_processor_id();
+ int expected;
+
+ mchk_header = (struct el_common *)la_ptr;
+ mchk_logout = (struct el_MCPCIA_uncorrected_frame_mcheck *)la_ptr;
+ expected = mcheck_expected(cpu);
+
+ mb();
+ mb(); /* magic */
+ draina();
+
+ switch (expected) {
+ case 0:
+ {
+ /* FIXME: how do we figure out which hose the
+ error was on? */
+ struct pci_controller *hose;
+ for (hose = hose_head; hose; hose = hose->next)
+ mcpcia_pci_clr_err(MCPCIA_HOSE2MID(hose->index));
+ break;
+ }
+ case 1:
+ mcpcia_pci_clr_err(mcheck_extra(cpu));
+ break;
+ default:
+ /* Otherwise, we're being called from mcpcia_probe_hose
+ and there's no hose clear an error from. */
+ break;
+ }
+
+ wrmces(0x7);
+ mb();
+
+ process_mcheck_info(vector, la_ptr, regs, "MCPCIA", expected != 0);
+ if (!expected && vector != 0x620 && vector != 0x630) {
+ mcpcia_print_uncorrectable(mchk_logout);
+ mcpcia_print_system_area(la_ptr);
+ }
+}
diff --git a/arch/alpha/kernel/core_polaris.c b/arch/alpha/kernel/core_polaris.c
new file mode 100644
index 000000000000..277674a500ff
--- /dev/null
+++ b/arch/alpha/kernel/core_polaris.c
@@ -0,0 +1,203 @@
+/*
+ * linux/arch/alpha/kernel/core_polaris.c
+ *
+ * POLARIS chip-specific code
+ */
+
+#define __EXTERN_INLINE inline
+#include <asm/io.h>
+#include <asm/core_polaris.h>
+#undef __EXTERN_INLINE
+
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/sched.h>
+#include <linux/init.h>
+
+#include <asm/ptrace.h>
+
+#include "proto.h"
+#include "pci_impl.h"
+
+/*
+ * BIOS32-style PCI interface:
+ */
+
+#define DEBUG_CONFIG 0
+
+#if DEBUG_CONFIG
+# define DBG_CFG(args) printk args
+#else
+# define DBG_CFG(args)
+#endif
+
+
+/*
+ * Given a bus, device, and function number, compute resulting
+ * configuration space address. This is fairly straightforward
+ * on POLARIS, since the chip itself generates Type 0 or Type 1
+ * cycles automatically depending on the bus number (Bus 0 is
+ * hardwired to Type 0, all others are Type 1. Peer bridges
+ * are not supported).
+ *
+ * All types:
+ *
+ * 3 3 3 3|3 3 3 3|3 3 2 2|2 2 2 2|2 2 2 2|1 1 1 1|1 1 1 1|1 1
+ * 9 8 7 6|5 4 3 2|1 0 9 8|7 6 5 4|3 2 1 0|9 8 7 6|5 4 3 2|1 0 9 8|7 6 5 4|3 2 1 0
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * |1|1|1|1|1|0|0|1|1|1|1|1|1|1|1|0|B|B|B|B|B|B|B|B|D|D|D|D|D|F|F|F|R|R|R|R|R|R|x|x|
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ *
+ * 23:16 bus number (8 bits = 128 possible buses)
+ * 15:11 Device number (5 bits)
+ * 10:8 function number
+ * 7:2 register number
+ *
+ * Notes:
+ * The function number selects which function of a multi-function device
+ * (e.g., scsi and ethernet).
+ *
+ * The register selects a DWORD (32 bit) register offset. Hence it
+ * doesn't get shifted by 2 bits as we want to "drop" the bottom two
+ * bits.
+ */
+
+static int
+mk_conf_addr(struct pci_bus *pbus, unsigned int device_fn, int where,
+ unsigned long *pci_addr, u8 *type1)
+{
+ u8 bus = pbus->number;
+
+ *type1 = (bus == 0) ? 0 : 1;
+ *pci_addr = (bus << 16) | (device_fn << 8) | (where) |
+ POLARIS_DENSE_CONFIG_BASE;
+
+ DBG_CFG(("mk_conf_addr(bus=%d ,device_fn=0x%x, where=0x%x,"
+ " returning address 0x%p\n"
+ bus, device_fn, where, *pci_addr));
+
+ return 0;
+}
+
+static int
+polaris_read_config(struct pci_bus *bus, unsigned int devfn, int where,
+ int size, u32 *value)
+{
+ unsigned long addr;
+ unsigned char type1;
+
+ if (mk_conf_addr(bus, devfn, where, &addr, &type1))
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ switch (size) {
+ case 1:
+ *value = __kernel_ldbu(*(vucp)addr);
+ break;
+ case 2:
+ *value = __kernel_ldwu(*(vusp)addr);
+ break;
+ case 4:
+ *value = *(vuip)addr;
+ break;
+ }
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+
+static int
+polaris_write_config(struct pci_bus *bus, unsigned int devfn, int where,
+ int size, u32 value)
+{
+ unsigned long addr;
+ unsigned char type1;
+
+ if (mk_conf_addr(bus, devfn, where, &addr, &type1))
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ switch (size) {
+ case 1:
+ __kernel_stb(value, *(vucp)addr);
+ mb();
+ __kernel_ldbu(*(vucp)addr);
+ break;
+ case 2:
+ __kernel_stw(value, *(vusp)addr);
+ mb();
+ __kernel_ldwu(*(vusp)addr);
+ break;
+ case 4:
+ *(vuip)addr = value;
+ mb();
+ *(vuip)addr;
+ break;
+ }
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+struct pci_ops polaris_pci_ops =
+{
+ .read = polaris_read_config,
+ .write = polaris_write_config,
+};
+
+void __init
+polaris_init_arch(void)
+{
+ struct pci_controller *hose;
+
+ /* May need to initialize error reporting (see PCICTL0/1), but
+ * for now assume that the firmware has done the right thing
+ * already.
+ */
+#if 0
+ printk("polaris_init_arch(): trusting firmware for setup\n");
+#endif
+
+ /*
+ * Create our single hose.
+ */
+
+ pci_isa_hose = hose = alloc_pci_controller();
+ hose->io_space = &ioport_resource;
+ hose->mem_space = &iomem_resource;
+ hose->index = 0;
+
+ hose->sparse_mem_base = 0;
+ hose->dense_mem_base = POLARIS_DENSE_MEM_BASE - IDENT_ADDR;
+ hose->sparse_io_base = 0;
+ hose->dense_io_base = POLARIS_DENSE_IO_BASE - IDENT_ADDR;
+
+ hose->sg_isa = hose->sg_pci = NULL;
+
+ /* The I/O window is fixed at 2G @ 2G. */
+ __direct_map_base = 0x80000000;
+ __direct_map_size = 0x80000000;
+}
+
+static inline void
+polaris_pci_clr_err(void)
+{
+ *(vusp)POLARIS_W_STATUS;
+ /* Write 1's to settable bits to clear errors */
+ *(vusp)POLARIS_W_STATUS = 0x7800;
+ mb();
+ *(vusp)POLARIS_W_STATUS;
+}
+
+void
+polaris_machine_check(unsigned long vector, unsigned long la_ptr,
+ struct pt_regs * regs)
+{
+ /* Clear the error before any reporting. */
+ mb();
+ mb();
+ draina();
+ polaris_pci_clr_err();
+ wrmces(0x7);
+ mb();
+
+ process_mcheck_info(vector, la_ptr, regs, "POLARIS",
+ mcheck_expected(0));
+}
diff --git a/arch/alpha/kernel/core_t2.c b/arch/alpha/kernel/core_t2.c
new file mode 100644
index 000000000000..ecce09e3626a
--- /dev/null
+++ b/arch/alpha/kernel/core_t2.c
@@ -0,0 +1,622 @@
+/*
+ * linux/arch/alpha/kernel/core_t2.c
+ *
+ * Written by Jay A Estabrook (jestabro@amt.tay1.dec.com).
+ * December 1996.
+ *
+ * based on CIA code by David A Rusling (david.rusling@reo.mts.dec.com)
+ *
+ * Code common to all T2 core logic chips.
+ */
+
+#define __EXTERN_INLINE
+#include <asm/io.h>
+#include <asm/core_t2.h>
+#undef __EXTERN_INLINE
+
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/sched.h>
+#include <linux/init.h>
+
+#include <asm/ptrace.h>
+#include <asm/delay.h>
+
+#include "proto.h"
+#include "pci_impl.h"
+
+/* For dumping initial DMA window settings. */
+#define DEBUG_PRINT_INITIAL_SETTINGS 0
+
+/* For dumping final DMA window settings. */
+#define DEBUG_PRINT_FINAL_SETTINGS 0
+
+/*
+ * By default, we direct-map starting at 2GB, in order to allow the
+ * maximum size direct-map window (2GB) to match the maximum amount of
+ * memory (2GB) that can be present on SABLEs. But that limits the
+ * floppy to DMA only via the scatter/gather window set up for 8MB
+ * ISA DMA, since the maximum ISA DMA address is 2GB-1.
+ *
+ * For now, this seems a reasonable trade-off: even though most SABLEs
+ * have less than 1GB of memory, floppy usage/performance will not
+ * really be affected by forcing it to go via scatter/gather...
+ */
+#define T2_DIRECTMAP_2G 1
+
+#if T2_DIRECTMAP_2G
+# define T2_DIRECTMAP_START 0x80000000UL
+# define T2_DIRECTMAP_LENGTH 0x80000000UL
+#else
+# define T2_DIRECTMAP_START 0x40000000UL
+# define T2_DIRECTMAP_LENGTH 0x40000000UL
+#endif
+
+/* The ISA scatter/gather window settings. */
+#define T2_ISA_SG_START 0x00800000UL
+#define T2_ISA_SG_LENGTH 0x00800000UL
+
+/*
+ * NOTE: Herein lie back-to-back mb instructions. They are magic.
+ * One plausible explanation is that the i/o controller does not properly
+ * handle the system transaction. Another involves timing. Ho hum.
+ */
+
+/*
+ * BIOS32-style PCI interface:
+ */
+
+#define DEBUG_CONFIG 0
+
+#if DEBUG_CONFIG
+# define DBG(args) printk args
+#else
+# define DBG(args)
+#endif
+
+static volatile unsigned int t2_mcheck_any_expected;
+static volatile unsigned int t2_mcheck_last_taken;
+
+/* Place to save the DMA Window registers as set up by SRM
+ for restoration during shutdown. */
+static struct
+{
+ struct {
+ unsigned long wbase;
+ unsigned long wmask;
+ unsigned long tbase;
+ } window[2];
+ unsigned long hae_1;
+ unsigned long hae_2;
+ unsigned long hae_3;
+ unsigned long hae_4;
+ unsigned long hbase;
+} t2_saved_config __attribute((common));
+
+/*
+ * Given a bus, device, and function number, compute resulting
+ * configuration space address and setup the T2_HAXR2 register
+ * accordingly. It is therefore not safe to have concurrent
+ * invocations to configuration space access routines, but there
+ * really shouldn't be any need for this.
+ *
+ * Type 0:
+ *
+ * 3 3|3 3 2 2|2 2 2 2|2 2 2 2|1 1 1 1|1 1 1 1|1 1
+ * 3 2|1 0 9 8|7 6 5 4|3 2 1 0|9 8 7 6|5 4 3 2|1 0 9 8|7 6 5 4|3 2 1 0
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | | |D|D|D|D|D|D|D|D|D|D|D|D|D|D|D|D|D|D|D|D|D|F|F|F|R|R|R|R|R|R|0|0|
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ *
+ * 31:11 Device select bit.
+ * 10:8 Function number
+ * 7:2 Register number
+ *
+ * Type 1:
+ *
+ * 3 3|3 3 2 2|2 2 2 2|2 2 2 2|1 1 1 1|1 1 1 1|1 1
+ * 3 2|1 0 9 8|7 6 5 4|3 2 1 0|9 8 7 6|5 4 3 2|1 0 9 8|7 6 5 4|3 2 1 0
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | | | | | | | | | | |B|B|B|B|B|B|B|B|D|D|D|D|D|F|F|F|R|R|R|R|R|R|0|1|
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ *
+ * 31:24 reserved
+ * 23:16 bus number (8 bits = 128 possible buses)
+ * 15:11 Device number (5 bits)
+ * 10:8 function number
+ * 7:2 register number
+ *
+ * Notes:
+ * The function number selects which function of a multi-function device
+ * (e.g., SCSI and Ethernet).
+ *
+ * The register selects a DWORD (32 bit) register offset. Hence it
+ * doesn't get shifted by 2 bits as we want to "drop" the bottom two
+ * bits.
+ */
+
+static int
+mk_conf_addr(struct pci_bus *pbus, unsigned int device_fn, int where,
+ unsigned long *pci_addr, unsigned char *type1)
+{
+ unsigned long addr;
+ u8 bus = pbus->number;
+
+ DBG(("mk_conf_addr(bus=%d, dfn=0x%x, where=0x%x,"
+ " addr=0x%lx, type1=0x%x)\n",
+ bus, device_fn, where, pci_addr, type1));
+
+ if (bus == 0) {
+ int device = device_fn >> 3;
+
+ /* Type 0 configuration cycle. */
+
+ if (device > 8) {
+ DBG(("mk_conf_addr: device (%d)>20, returning -1\n",
+ device));
+ return -1;
+ }
+
+ *type1 = 0;
+ addr = (0x0800L << device) | ((device_fn & 7) << 8) | (where);
+ } else {
+ /* Type 1 configuration cycle. */
+ *type1 = 1;
+ addr = (bus << 16) | (device_fn << 8) | (where);
+ }
+ *pci_addr = addr;
+ DBG(("mk_conf_addr: returning pci_addr 0x%lx\n", addr));
+ return 0;
+}
+
+/*
+ * NOTE: both conf_read() and conf_write() may set HAE_3 when needing
+ * to do type1 access. This is protected by the use of spinlock IRQ
+ * primitives in the wrapper functions pci_{read,write}_config_*()
+ * defined in drivers/pci/pci.c.
+ */
+static unsigned int
+conf_read(unsigned long addr, unsigned char type1)
+{
+ unsigned int value, cpu, taken;
+ unsigned long t2_cfg = 0;
+
+ cpu = smp_processor_id();
+
+ DBG(("conf_read(addr=0x%lx, type1=%d)\n", addr, type1));
+
+ /* If Type1 access, must set T2 CFG. */
+ if (type1) {
+ t2_cfg = *(vulp)T2_HAE_3 & ~0xc0000000UL;
+ *(vulp)T2_HAE_3 = 0x40000000UL | t2_cfg;
+ mb();
+ }
+ mb();
+ draina();
+
+ mcheck_expected(cpu) = 1;
+ mcheck_taken(cpu) = 0;
+ t2_mcheck_any_expected |= (1 << cpu);
+ mb();
+
+ /* Access configuration space. */
+ value = *(vuip)addr;
+ mb();
+ mb(); /* magic */
+
+ /* Wait for possible mcheck. Also, this lets other CPUs clear
+ their mchecks as well, as they can reliably tell when
+ another CPU is in the midst of handling a real mcheck via
+ the "taken" function. */
+ udelay(100);
+
+ if ((taken = mcheck_taken(cpu))) {
+ mcheck_taken(cpu) = 0;
+ t2_mcheck_last_taken |= (1 << cpu);
+ value = 0xffffffffU;
+ mb();
+ }
+ mcheck_expected(cpu) = 0;
+ t2_mcheck_any_expected = 0;
+ mb();
+
+ /* If Type1 access, must reset T2 CFG so normal IO space ops work. */
+ if (type1) {
+ *(vulp)T2_HAE_3 = t2_cfg;
+ mb();
+ }
+
+ return value;
+}
+
+static void
+conf_write(unsigned long addr, unsigned int value, unsigned char type1)
+{
+ unsigned int cpu, taken;
+ unsigned long t2_cfg = 0;
+
+ cpu = smp_processor_id();
+
+ /* If Type1 access, must set T2 CFG. */
+ if (type1) {
+ t2_cfg = *(vulp)T2_HAE_3 & ~0xc0000000UL;
+ *(vulp)T2_HAE_3 = t2_cfg | 0x40000000UL;
+ mb();
+ }
+ mb();
+ draina();
+
+ mcheck_expected(cpu) = 1;
+ mcheck_taken(cpu) = 0;
+ t2_mcheck_any_expected |= (1 << cpu);
+ mb();
+
+ /* Access configuration space. */
+ *(vuip)addr = value;
+ mb();
+ mb(); /* magic */
+
+ /* Wait for possible mcheck. Also, this lets other CPUs clear
+ their mchecks as well, as they can reliably tell when
+ this CPU is in the midst of handling a real mcheck via
+ the "taken" function. */
+ udelay(100);
+
+ if ((taken = mcheck_taken(cpu))) {
+ mcheck_taken(cpu) = 0;
+ t2_mcheck_last_taken |= (1 << cpu);
+ mb();
+ }
+ mcheck_expected(cpu) = 0;
+ t2_mcheck_any_expected = 0;
+ mb();
+
+ /* If Type1 access, must reset T2 CFG so normal IO space ops work. */
+ if (type1) {
+ *(vulp)T2_HAE_3 = t2_cfg;
+ mb();
+ }
+}
+
+static int
+t2_read_config(struct pci_bus *bus, unsigned int devfn, int where,
+ int size, u32 *value)
+{
+ unsigned long addr, pci_addr;
+ unsigned char type1;
+ int shift;
+ long mask;
+
+ if (mk_conf_addr(bus, devfn, where, &pci_addr, &type1))
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ mask = (size - 1) * 8;
+ shift = (where & 3) * 8;
+ addr = (pci_addr << 5) + mask + T2_CONF;
+ *value = conf_read(addr, type1) >> (shift);
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int
+t2_write_config(struct pci_bus *bus, unsigned int devfn, int where, int size,
+ u32 value)
+{
+ unsigned long addr, pci_addr;
+ unsigned char type1;
+ long mask;
+
+ if (mk_conf_addr(bus, devfn, where, &pci_addr, &type1))
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ mask = (size - 1) * 8;
+ addr = (pci_addr << 5) + mask + T2_CONF;
+ conf_write(addr, value << ((where & 3) * 8), type1);
+ return PCIBIOS_SUCCESSFUL;
+}
+
+struct pci_ops t2_pci_ops =
+{
+ .read = t2_read_config,
+ .write = t2_write_config,
+};
+
+static void __init
+t2_direct_map_window1(unsigned long base, unsigned long length)
+{
+ unsigned long temp;
+
+ __direct_map_base = base;
+ __direct_map_size = length;
+
+ temp = (base & 0xfff00000UL) | ((base + length - 1) >> 20);
+ *(vulp)T2_WBASE1 = temp | 0x80000UL; /* OR in ENABLE bit */
+ temp = (length - 1) & 0xfff00000UL;
+ *(vulp)T2_WMASK1 = temp;
+ *(vulp)T2_TBASE1 = 0;
+
+#if DEBUG_PRINT_FINAL_SETTINGS
+ printk("%s: setting WBASE1=0x%lx WMASK1=0x%lx TBASE1=0x%lx\n",
+ __FUNCTION__,
+ *(vulp)T2_WBASE1,
+ *(vulp)T2_WMASK1,
+ *(vulp)T2_TBASE1);
+#endif
+}
+
+static void __init
+t2_sg_map_window2(struct pci_controller *hose,
+ unsigned long base,
+ unsigned long length)
+{
+ unsigned long temp;
+
+ /* Note we can only do 1 SG window, as the other is for direct, so
+ do an ISA SG area, especially for the floppy. */
+ hose->sg_isa = iommu_arena_new(hose, base, length, 0);
+ hose->sg_pci = NULL;
+
+ temp = (base & 0xfff00000UL) | ((base + length - 1) >> 20);
+ *(vulp)T2_WBASE2 = temp | 0xc0000UL; /* OR in ENABLE/SG bits */
+ temp = (length - 1) & 0xfff00000UL;
+ *(vulp)T2_WMASK2 = temp;
+ *(vulp)T2_TBASE2 = virt_to_phys(hose->sg_isa->ptes) >> 1;
+ mb();
+
+ t2_pci_tbi(hose, 0, -1); /* flush TLB all */
+
+#if DEBUG_PRINT_FINAL_SETTINGS
+ printk("%s: setting WBASE2=0x%lx WMASK2=0x%lx TBASE2=0x%lx\n",
+ __FUNCTION__,
+ *(vulp)T2_WBASE2,
+ *(vulp)T2_WMASK2,
+ *(vulp)T2_TBASE2);
+#endif
+}
+
+static void __init
+t2_save_configuration(void)
+{
+#if DEBUG_PRINT_INITIAL_SETTINGS
+ printk("%s: HAE_1 was 0x%lx\n", __FUNCTION__, srm_hae); /* HW is 0 */
+ printk("%s: HAE_2 was 0x%lx\n", __FUNCTION__, *(vulp)T2_HAE_2);
+ printk("%s: HAE_3 was 0x%lx\n", __FUNCTION__, *(vulp)T2_HAE_3);
+ printk("%s: HAE_4 was 0x%lx\n", __FUNCTION__, *(vulp)T2_HAE_4);
+ printk("%s: HBASE was 0x%lx\n", __FUNCTION__, *(vulp)T2_HBASE);
+
+ printk("%s: WBASE1=0x%lx WMASK1=0x%lx TBASE1=0x%lx\n", __FUNCTION__,
+ *(vulp)T2_WBASE1, *(vulp)T2_WMASK1, *(vulp)T2_TBASE1);
+ printk("%s: WBASE2=0x%lx WMASK2=0x%lx TBASE2=0x%lx\n", __FUNCTION__,
+ *(vulp)T2_WBASE2, *(vulp)T2_WMASK2, *(vulp)T2_TBASE2);
+#endif
+
+ /*
+ * Save the DMA Window registers.
+ */
+ t2_saved_config.window[0].wbase = *(vulp)T2_WBASE1;
+ t2_saved_config.window[0].wmask = *(vulp)T2_WMASK1;
+ t2_saved_config.window[0].tbase = *(vulp)T2_TBASE1;
+ t2_saved_config.window[1].wbase = *(vulp)T2_WBASE2;
+ t2_saved_config.window[1].wmask = *(vulp)T2_WMASK2;
+ t2_saved_config.window[1].tbase = *(vulp)T2_TBASE2;
+
+ t2_saved_config.hae_1 = srm_hae; /* HW is already set to 0 */
+ t2_saved_config.hae_2 = *(vulp)T2_HAE_2;
+ t2_saved_config.hae_3 = *(vulp)T2_HAE_3;
+ t2_saved_config.hae_4 = *(vulp)T2_HAE_4;
+ t2_saved_config.hbase = *(vulp)T2_HBASE;
+}
+
+void __init
+t2_init_arch(void)
+{
+ struct pci_controller *hose;
+ unsigned long temp;
+ unsigned int i;
+
+ for (i = 0; i < NR_CPUS; i++) {
+ mcheck_expected(i) = 0;
+ mcheck_taken(i) = 0;
+ }
+ t2_mcheck_any_expected = 0;
+ t2_mcheck_last_taken = 0;
+
+ /* Enable scatter/gather TLB use. */
+ temp = *(vulp)T2_IOCSR;
+ if (!(temp & (0x1UL << 26))) {
+ printk("t2_init_arch: enabling SG TLB, IOCSR was 0x%lx\n",
+ temp);
+ *(vulp)T2_IOCSR = temp | (0x1UL << 26);
+ mb();
+ *(vulp)T2_IOCSR; /* read it back to make sure */
+ }
+
+ t2_save_configuration();
+
+ /*
+ * Create our single hose.
+ */
+ pci_isa_hose = hose = alloc_pci_controller();
+ hose->io_space = &ioport_resource;
+ hose->mem_space = &iomem_resource;
+ hose->index = 0;
+
+ hose->sparse_mem_base = T2_SPARSE_MEM - IDENT_ADDR;
+ hose->dense_mem_base = T2_DENSE_MEM - IDENT_ADDR;
+ hose->sparse_io_base = T2_IO - IDENT_ADDR;
+ hose->dense_io_base = 0;
+
+ /*
+ * Set up the PCI->physical memory translation windows.
+ *
+ * Window 1 is direct mapped.
+ * Window 2 is scatter/gather (for ISA).
+ */
+
+ t2_direct_map_window1(T2_DIRECTMAP_START, T2_DIRECTMAP_LENGTH);
+
+ /* Always make an ISA DMA window. */
+ t2_sg_map_window2(hose, T2_ISA_SG_START, T2_ISA_SG_LENGTH);
+
+ *(vulp)T2_HBASE = 0x0; /* Disable HOLES. */
+
+ /* Zero HAE. */
+ *(vulp)T2_HAE_1 = 0; mb(); /* Sparse MEM HAE */
+ *(vulp)T2_HAE_2 = 0; mb(); /* Sparse I/O HAE */
+ *(vulp)T2_HAE_3 = 0; mb(); /* Config Space HAE */
+
+ /*
+ * We also now zero out HAE_4, the dense memory HAE, so that
+ * we need not account for its "offset" when accessing dense
+ * memory resources which we allocated in our normal way. This
+ * HAE would need to stay untouched were we to keep the SRM
+ * resource settings.
+ *
+ * Thus we can now run standard X servers on SABLE/LYNX. :-)
+ */
+ *(vulp)T2_HAE_4 = 0; mb();
+}
+
+void
+t2_kill_arch(int mode)
+{
+ /*
+ * Restore the DMA Window registers.
+ */
+ *(vulp)T2_WBASE1 = t2_saved_config.window[0].wbase;
+ *(vulp)T2_WMASK1 = t2_saved_config.window[0].wmask;
+ *(vulp)T2_TBASE1 = t2_saved_config.window[0].tbase;
+ *(vulp)T2_WBASE2 = t2_saved_config.window[1].wbase;
+ *(vulp)T2_WMASK2 = t2_saved_config.window[1].wmask;
+ *(vulp)T2_TBASE2 = t2_saved_config.window[1].tbase;
+ mb();
+
+ *(vulp)T2_HAE_1 = srm_hae;
+ *(vulp)T2_HAE_2 = t2_saved_config.hae_2;
+ *(vulp)T2_HAE_3 = t2_saved_config.hae_3;
+ *(vulp)T2_HAE_4 = t2_saved_config.hae_4;
+ *(vulp)T2_HBASE = t2_saved_config.hbase;
+ mb();
+ *(vulp)T2_HBASE; /* READ it back to ensure WRITE occurred. */
+}
+
+void
+t2_pci_tbi(struct pci_controller *hose, dma_addr_t start, dma_addr_t end)
+{
+ unsigned long t2_iocsr;
+
+ t2_iocsr = *(vulp)T2_IOCSR;
+
+ /* set the TLB Clear bit */
+ *(vulp)T2_IOCSR = t2_iocsr | (0x1UL << 28);
+ mb();
+ *(vulp)T2_IOCSR; /* read it back to make sure */
+
+ /* clear the TLB Clear bit */
+ *(vulp)T2_IOCSR = t2_iocsr & ~(0x1UL << 28);
+ mb();
+ *(vulp)T2_IOCSR; /* read it back to make sure */
+}
+
+#define SIC_SEIC (1UL << 33) /* System Event Clear */
+
+static void
+t2_clear_errors(int cpu)
+{
+ struct sable_cpu_csr *cpu_regs;
+
+ cpu_regs = (struct sable_cpu_csr *)T2_CPUn_BASE(cpu);
+
+ cpu_regs->sic &= ~SIC_SEIC;
+
+ /* Clear CPU errors. */
+ cpu_regs->bcce |= cpu_regs->bcce;
+ cpu_regs->cbe |= cpu_regs->cbe;
+ cpu_regs->bcue |= cpu_regs->bcue;
+ cpu_regs->dter |= cpu_regs->dter;
+
+ *(vulp)T2_CERR1 |= *(vulp)T2_CERR1;
+ *(vulp)T2_PERR1 |= *(vulp)T2_PERR1;
+
+ mb();
+ mb(); /* magic */
+}
+
+/*
+ * SABLE seems to have a "broadcast" style machine check, in that all
+ * CPUs receive it. And, the issuing CPU, in the case of PCI Config
+ * space read/write faults, will also receive a second mcheck, upon
+ * lowering IPL during completion processing in pci_read_config_byte()
+ * et al.
+ *
+ * Hence all the taken/expected/any_expected/last_taken stuff...
+ */
+void
+t2_machine_check(unsigned long vector, unsigned long la_ptr,
+ struct pt_regs * regs)
+{
+ int cpu = smp_processor_id();
+#ifdef CONFIG_VERBOSE_MCHECK
+ struct el_common *mchk_header = (struct el_common *)la_ptr;
+#endif
+
+ /* Clear the error before any reporting. */
+ mb();
+ mb(); /* magic */
+ draina();
+ t2_clear_errors(cpu);
+
+ /* This should not actually be done until the logout frame is
+ examined, but, since we don't do that, go on and do this... */
+ wrmces(0x7);
+ mb();
+
+ /* Now, do testing for the anomalous conditions. */
+ if (!mcheck_expected(cpu) && t2_mcheck_any_expected) {
+ /*
+ * FUNKY: Received mcheck on a CPU and not
+ * expecting it, but another CPU is expecting one.
+ *
+ * Just dismiss it for now on this CPU...
+ */
+#ifdef CONFIG_VERBOSE_MCHECK
+ if (alpha_verbose_mcheck > 1) {
+ printk("t2_machine_check(cpu%d): any_expected 0x%x -"
+ " (assumed) spurious -"
+ " code 0x%x\n", cpu, t2_mcheck_any_expected,
+ (unsigned int)mchk_header->code);
+ }
+#endif
+ return;
+ }
+
+ if (!mcheck_expected(cpu) && !t2_mcheck_any_expected) {
+ if (t2_mcheck_last_taken & (1 << cpu)) {
+#ifdef CONFIG_VERBOSE_MCHECK
+ if (alpha_verbose_mcheck > 1) {
+ printk("t2_machine_check(cpu%d): last_taken 0x%x - "
+ "unexpected mcheck - code 0x%x\n",
+ cpu, t2_mcheck_last_taken,
+ (unsigned int)mchk_header->code);
+ }
+#endif
+ t2_mcheck_last_taken = 0;
+ mb();
+ return;
+ } else {
+ t2_mcheck_last_taken = 0;
+ mb();
+ }
+ }
+
+#ifdef CONFIG_VERBOSE_MCHECK
+ if (alpha_verbose_mcheck > 1) {
+ printk("%s t2_mcheck(cpu%d): last_taken 0x%x - "
+ "any_expected 0x%x - code 0x%x\n",
+ (mcheck_expected(cpu) ? "EX" : "UN"), cpu,
+ t2_mcheck_last_taken, t2_mcheck_any_expected,
+ (unsigned int)mchk_header->code);
+ }
+#endif
+
+ process_mcheck_info(vector, la_ptr, regs, "T2", mcheck_expected(cpu));
+}
diff --git a/arch/alpha/kernel/core_titan.c b/arch/alpha/kernel/core_titan.c
new file mode 100644
index 000000000000..3662fef7db9a
--- /dev/null
+++ b/arch/alpha/kernel/core_titan.c
@@ -0,0 +1,806 @@
+/*
+ * linux/arch/alpha/kernel/core_titan.c
+ *
+ * Code common to all TITAN core logic chips.
+ */
+
+#define __EXTERN_INLINE inline
+#include <asm/io.h>
+#include <asm/core_titan.h>
+#undef __EXTERN_INLINE
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/sched.h>
+#include <linux/init.h>
+#include <linux/vmalloc.h>
+#include <linux/bootmem.h>
+
+#include <asm/ptrace.h>
+#include <asm/smp.h>
+#include <asm/pgalloc.h>
+#include <asm/tlbflush.h>
+
+#include "proto.h"
+#include "pci_impl.h"
+
+/* Save Titan configuration data as the console had it set up. */
+
+struct
+{
+ unsigned long wsba[4];
+ unsigned long wsm[4];
+ unsigned long tba[4];
+} saved_config[4] __attribute__((common));
+
+/*
+ * BIOS32-style PCI interface:
+ */
+
+#define DEBUG_CONFIG 0
+
+#if DEBUG_CONFIG
+# define DBG_CFG(args) printk args
+#else
+# define DBG_CFG(args)
+#endif
+
+
+/*
+ * Routines to access TIG registers.
+ */
+static inline volatile unsigned long *
+mk_tig_addr(int offset)
+{
+ return (volatile unsigned long *)(TITAN_TIG_SPACE + (offset << 6));
+}
+
+static inline u8
+titan_read_tig(int offset, u8 value)
+{
+ volatile unsigned long *tig_addr = mk_tig_addr(offset);
+ return (u8)(*tig_addr & 0xff);
+}
+
+static inline void
+titan_write_tig(int offset, u8 value)
+{
+ volatile unsigned long *tig_addr = mk_tig_addr(offset);
+ *tig_addr = (unsigned long)value;
+}
+
+
+/*
+ * Given a bus, device, and function number, compute resulting
+ * configuration space address
+ * accordingly. It is therefore not safe to have concurrent
+ * invocations to configuration space access routines, but there
+ * really shouldn't be any need for this.
+ *
+ * Note that all config space accesses use Type 1 address format.
+ *
+ * Note also that type 1 is determined by non-zero bus number.
+ *
+ * Type 1:
+ *
+ * 3 3|3 3 2 2|2 2 2 2|2 2 2 2|1 1 1 1|1 1 1 1|1 1
+ * 3 2|1 0 9 8|7 6 5 4|3 2 1 0|9 8 7 6|5 4 3 2|1 0 9 8|7 6 5 4|3 2 1 0
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | | | | | | | | | | |B|B|B|B|B|B|B|B|D|D|D|D|D|F|F|F|R|R|R|R|R|R|0|1|
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ *
+ * 31:24 reserved
+ * 23:16 bus number (8 bits = 128 possible buses)
+ * 15:11 Device number (5 bits)
+ * 10:8 function number
+ * 7:2 register number
+ *
+ * Notes:
+ * The function number selects which function of a multi-function device
+ * (e.g., SCSI and Ethernet).
+ *
+ * The register selects a DWORD (32 bit) register offset. Hence it
+ * doesn't get shifted by 2 bits as we want to "drop" the bottom two
+ * bits.
+ */
+
+static int
+mk_conf_addr(struct pci_bus *pbus, unsigned int device_fn, int where,
+ unsigned long *pci_addr, unsigned char *type1)
+{
+ struct pci_controller *hose = pbus->sysdata;
+ unsigned long addr;
+ u8 bus = pbus->number;
+
+ DBG_CFG(("mk_conf_addr(bus=%d ,device_fn=0x%x, where=0x%x, "
+ "pci_addr=0x%p, type1=0x%p)\n",
+ bus, device_fn, where, pci_addr, type1));
+
+ if (!pbus->parent) /* No parent means peer PCI bus. */
+ bus = 0;
+ *type1 = (bus != 0);
+
+ addr = (bus << 16) | (device_fn << 8) | where;
+ addr |= hose->config_space_base;
+
+ *pci_addr = addr;
+ DBG_CFG(("mk_conf_addr: returning pci_addr 0x%lx\n", addr));
+ return 0;
+}
+
+static int
+titan_read_config(struct pci_bus *bus, unsigned int devfn, int where,
+ int size, u32 *value)
+{
+ unsigned long addr;
+ unsigned char type1;
+
+ if (mk_conf_addr(bus, devfn, where, &addr, &type1))
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ switch (size) {
+ case 1:
+ *value = __kernel_ldbu(*(vucp)addr);
+ break;
+ case 2:
+ *value = __kernel_ldwu(*(vusp)addr);
+ break;
+ case 4:
+ *value = *(vuip)addr;
+ break;
+ }
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int
+titan_write_config(struct pci_bus *bus, unsigned int devfn, int where,
+ int size, u32 value)
+{
+ unsigned long addr;
+ unsigned char type1;
+
+ if (mk_conf_addr(bus, devfn, where, &addr, &type1))
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ switch (size) {
+ case 1:
+ __kernel_stb(value, *(vucp)addr);
+ mb();
+ __kernel_ldbu(*(vucp)addr);
+ break;
+ case 2:
+ __kernel_stw(value, *(vusp)addr);
+ mb();
+ __kernel_ldwu(*(vusp)addr);
+ break;
+ case 4:
+ *(vuip)addr = value;
+ mb();
+ *(vuip)addr;
+ break;
+ }
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+struct pci_ops titan_pci_ops =
+{
+ .read = titan_read_config,
+ .write = titan_write_config,
+};
+
+
+void
+titan_pci_tbi(struct pci_controller *hose, dma_addr_t start, dma_addr_t end)
+{
+ titan_pachip *pachip =
+ (hose->index & 1) ? TITAN_pachip1 : TITAN_pachip0;
+ titan_pachip_port *port;
+ volatile unsigned long *csr;
+ unsigned long value;
+
+ /* Get the right hose. */
+ port = &pachip->g_port;
+ if (hose->index & 2)
+ port = &pachip->a_port;
+
+ /* We can invalidate up to 8 tlb entries in a go. The flush
+ matches against <31:16> in the pci address.
+ Note that gtlbi* and atlbi* are in the same place in the g_port
+ and a_port, respectively, so the g_port offset can be used
+ even if hose is an a_port */
+ csr = &port->port_specific.g.gtlbia.csr;
+ if (((start ^ end) & 0xffff0000) == 0)
+ csr = &port->port_specific.g.gtlbiv.csr;
+
+ /* For TBIA, it doesn't matter what value we write. For TBI,
+ it's the shifted tag bits. */
+ value = (start & 0xffff0000) >> 12;
+
+ wmb();
+ *csr = value;
+ mb();
+ *csr;
+}
+
+static int
+titan_query_agp(titan_pachip_port *port)
+{
+ union TPAchipPCTL pctl;
+
+ /* set up APCTL */
+ pctl.pctl_q_whole = port->pctl.csr;
+
+ return pctl.pctl_r_bits.apctl_v_agp_present;
+
+}
+
+static void __init
+titan_init_one_pachip_port(titan_pachip_port *port, int index)
+{
+ struct pci_controller *hose;
+
+ hose = alloc_pci_controller();
+ if (index == 0)
+ pci_isa_hose = hose;
+ hose->io_space = alloc_resource();
+ hose->mem_space = alloc_resource();
+
+ /*
+ * This is for userland consumption. The 40-bit PIO bias that we
+ * use in the kernel through KSEG doesn't work in the page table
+ * based user mappings. (43-bit KSEG sign extends the physical
+ * address from bit 40 to hit the I/O bit - mapped addresses don't).
+ * So make sure we get the 43-bit PIO bias.
+ */
+ hose->sparse_mem_base = 0;
+ hose->sparse_io_base = 0;
+ hose->dense_mem_base
+ = (TITAN_MEM(index) & 0xffffffffffUL) | 0x80000000000UL;
+ hose->dense_io_base
+ = (TITAN_IO(index) & 0xffffffffffUL) | 0x80000000000UL;
+
+ hose->config_space_base = TITAN_CONF(index);
+ hose->index = index;
+
+ hose->io_space->start = TITAN_IO(index) - TITAN_IO_BIAS;
+ hose->io_space->end = hose->io_space->start + TITAN_IO_SPACE - 1;
+ hose->io_space->name = pci_io_names[index];
+ hose->io_space->flags = IORESOURCE_IO;
+
+ hose->mem_space->start = TITAN_MEM(index) - TITAN_MEM_BIAS;
+ hose->mem_space->end = hose->mem_space->start + 0xffffffff;
+ hose->mem_space->name = pci_mem_names[index];
+ hose->mem_space->flags = IORESOURCE_MEM;
+
+ if (request_resource(&ioport_resource, hose->io_space) < 0)
+ printk(KERN_ERR "Failed to request IO on hose %d\n", index);
+ if (request_resource(&iomem_resource, hose->mem_space) < 0)
+ printk(KERN_ERR "Failed to request MEM on hose %d\n", index);
+
+ /*
+ * Save the existing PCI window translations. SRM will
+ * need them when we go to reboot.
+ */
+ saved_config[index].wsba[0] = port->wsba[0].csr;
+ saved_config[index].wsm[0] = port->wsm[0].csr;
+ saved_config[index].tba[0] = port->tba[0].csr;
+
+ saved_config[index].wsba[1] = port->wsba[1].csr;
+ saved_config[index].wsm[1] = port->wsm[1].csr;
+ saved_config[index].tba[1] = port->tba[1].csr;
+
+ saved_config[index].wsba[2] = port->wsba[2].csr;
+ saved_config[index].wsm[2] = port->wsm[2].csr;
+ saved_config[index].tba[2] = port->tba[2].csr;
+
+ saved_config[index].wsba[3] = port->wsba[3].csr;
+ saved_config[index].wsm[3] = port->wsm[3].csr;
+ saved_config[index].tba[3] = port->tba[3].csr;
+
+ /*
+ * Set up the PCI to main memory translation windows.
+ *
+ * Note: Window 3 on Titan is Scatter-Gather ONLY.
+ *
+ * Window 0 is scatter-gather 8MB at 8MB (for isa)
+ * Window 1 is direct access 1GB at 2GB
+ * Window 2 is scatter-gather 1GB at 3GB
+ */
+ hose->sg_isa = iommu_arena_new(hose, 0x00800000, 0x00800000, 0);
+ hose->sg_isa->align_entry = 8; /* 64KB for ISA */
+
+ hose->sg_pci = iommu_arena_new(hose, 0xc0000000, 0x40000000, 0);
+ hose->sg_pci->align_entry = 4; /* Titan caches 4 PTEs at a time */
+
+ port->wsba[0].csr = hose->sg_isa->dma_base | 3;
+ port->wsm[0].csr = (hose->sg_isa->size - 1) & 0xfff00000;
+ port->tba[0].csr = virt_to_phys(hose->sg_isa->ptes);
+
+ port->wsba[1].csr = __direct_map_base | 1;
+ port->wsm[1].csr = (__direct_map_size - 1) & 0xfff00000;
+ port->tba[1].csr = 0;
+
+ port->wsba[2].csr = hose->sg_pci->dma_base | 3;
+ port->wsm[2].csr = (hose->sg_pci->size - 1) & 0xfff00000;
+ port->tba[2].csr = virt_to_phys(hose->sg_pci->ptes);
+
+ port->wsba[3].csr = 0;
+
+ /* Enable the Monster Window to make DAC pci64 possible. */
+ port->pctl.csr |= pctl_m_mwin;
+
+ /*
+ * If it's an AGP port, initialize agplastwr.
+ */
+ if (titan_query_agp(port))
+ port->port_specific.a.agplastwr.csr = __direct_map_base;
+
+ titan_pci_tbi(hose, 0, -1);
+}
+
+static void __init
+titan_init_pachips(titan_pachip *pachip0, titan_pachip *pachip1)
+{
+ int pchip1_present = TITAN_cchip->csc.csr & 1L<<14;
+
+ /* Init the ports in hose order... */
+ titan_init_one_pachip_port(&pachip0->g_port, 0); /* hose 0 */
+ if (pchip1_present)
+ titan_init_one_pachip_port(&pachip1->g_port, 1);/* hose 1 */
+ titan_init_one_pachip_port(&pachip0->a_port, 2); /* hose 2 */
+ if (pchip1_present)
+ titan_init_one_pachip_port(&pachip1->a_port, 3);/* hose 3 */
+}
+
+static void __init
+titan_init_vga_hose(void)
+{
+#ifdef CONFIG_VGA_HOSE
+ u64 *pu64 = (u64 *)((u64)hwrpb + hwrpb->ctbt_offset);
+
+ if (pu64[7] == 3) { /* TERM_TYPE == graphics */
+ struct pci_controller *hose;
+ int h = (pu64[30] >> 24) & 0xff; /* console hose # */
+
+ /*
+ * Our hose numbering matches the console's, so just find
+ * the right one...
+ */
+ for (hose = hose_head; hose; hose = hose->next) {
+ if (hose->index == h) break;
+ }
+
+ if (hose) {
+ printk("Console graphics on hose %d\n", hose->index);
+ pci_vga_hose = hose;
+ }
+ }
+#endif /* CONFIG_VGA_HOSE */
+}
+
+void __init
+titan_init_arch(void)
+{
+#if 0
+ printk("%s: titan_init_arch()\n", __FUNCTION__);
+ printk("%s: CChip registers:\n", __FUNCTION__);
+ printk("%s: CSR_CSC 0x%lx\n", __FUNCTION__, TITAN_cchip->csc.csr);
+ printk("%s: CSR_MTR 0x%lx\n", __FUNCTION__, TITAN_cchip->mtr.csr);
+ printk("%s: CSR_MISC 0x%lx\n", __FUNCTION__, TITAN_cchip->misc.csr);
+ printk("%s: CSR_DIM0 0x%lx\n", __FUNCTION__, TITAN_cchip->dim0.csr);
+ printk("%s: CSR_DIM1 0x%lx\n", __FUNCTION__, TITAN_cchip->dim1.csr);
+ printk("%s: CSR_DIR0 0x%lx\n", __FUNCTION__, TITAN_cchip->dir0.csr);
+ printk("%s: CSR_DIR1 0x%lx\n", __FUNCTION__, TITAN_cchip->dir1.csr);
+ printk("%s: CSR_DRIR 0x%lx\n", __FUNCTION__, TITAN_cchip->drir.csr);
+
+ printk("%s: DChip registers:\n", __FUNCTION__);
+ printk("%s: CSR_DSC 0x%lx\n", __FUNCTION__, TITAN_dchip->dsc.csr);
+ printk("%s: CSR_STR 0x%lx\n", __FUNCTION__, TITAN_dchip->str.csr);
+ printk("%s: CSR_DREV 0x%lx\n", __FUNCTION__, TITAN_dchip->drev.csr);
+#endif
+
+ boot_cpuid = __hard_smp_processor_id();
+
+ /* With multiple PCI busses, we play with I/O as physical addrs. */
+ ioport_resource.end = ~0UL;
+
+ /* PCI DMA Direct Mapping is 1GB at 2GB. */
+ __direct_map_base = 0x80000000;
+ __direct_map_size = 0x40000000;
+
+ /* Init the PA chip(s). */
+ titan_init_pachips(TITAN_pachip0, TITAN_pachip1);
+
+ /* Check for graphic console location (if any). */
+ titan_init_vga_hose();
+}
+
+static void
+titan_kill_one_pachip_port(titan_pachip_port *port, int index)
+{
+ port->wsba[0].csr = saved_config[index].wsba[0];
+ port->wsm[0].csr = saved_config[index].wsm[0];
+ port->tba[0].csr = saved_config[index].tba[0];
+
+ port->wsba[1].csr = saved_config[index].wsba[1];
+ port->wsm[1].csr = saved_config[index].wsm[1];
+ port->tba[1].csr = saved_config[index].tba[1];
+
+ port->wsba[2].csr = saved_config[index].wsba[2];
+ port->wsm[2].csr = saved_config[index].wsm[2];
+ port->tba[2].csr = saved_config[index].tba[2];
+
+ port->wsba[3].csr = saved_config[index].wsba[3];
+ port->wsm[3].csr = saved_config[index].wsm[3];
+ port->tba[3].csr = saved_config[index].tba[3];
+}
+
+static void
+titan_kill_pachips(titan_pachip *pachip0, titan_pachip *pachip1)
+{
+ int pchip1_present = TITAN_cchip->csc.csr & 1L<<14;
+
+ if (pchip1_present) {
+ titan_kill_one_pachip_port(&pachip1->g_port, 1);
+ titan_kill_one_pachip_port(&pachip1->a_port, 3);
+ }
+ titan_kill_one_pachip_port(&pachip0->g_port, 0);
+ titan_kill_one_pachip_port(&pachip0->a_port, 2);
+}
+
+void
+titan_kill_arch(int mode)
+{
+ titan_kill_pachips(TITAN_pachip0, TITAN_pachip1);
+}
+
+
+/*
+ * IO map support.
+ */
+
+void __iomem *
+titan_ioremap(unsigned long addr, unsigned long size)
+{
+ int h = (addr & TITAN_HOSE_MASK) >> TITAN_HOSE_SHIFT;
+ unsigned long baddr = addr & ~TITAN_HOSE_MASK;
+ unsigned long last = baddr + size - 1;
+ struct pci_controller *hose;
+ struct vm_struct *area;
+ unsigned long vaddr;
+ unsigned long *ptes;
+ unsigned long pfn;
+
+ /*
+ * Adjust the addr.
+ */
+#ifdef CONFIG_VGA_HOSE
+ if (pci_vga_hose && __titan_is_mem_vga(addr)) {
+ h = pci_vga_hose->index;
+ addr += pci_vga_hose->mem_space->start;
+ }
+#endif
+
+ /*
+ * Find the hose.
+ */
+ for (hose = hose_head; hose; hose = hose->next)
+ if (hose->index == h)
+ break;
+ if (!hose)
+ return NULL;
+
+ /*
+ * Is it direct-mapped?
+ */
+ if ((baddr >= __direct_map_base) &&
+ ((baddr + size - 1) < __direct_map_base + __direct_map_size)) {
+ vaddr = addr - __direct_map_base + TITAN_MEM_BIAS;
+ return (void __iomem *) vaddr;
+ }
+
+ /*
+ * Check the scatter-gather arena.
+ */
+ if (hose->sg_pci &&
+ baddr >= (unsigned long)hose->sg_pci->dma_base &&
+ last < (unsigned long)hose->sg_pci->dma_base + hose->sg_pci->size){
+
+ /*
+ * Adjust the limits (mappings must be page aligned)
+ */
+ baddr -= hose->sg_pci->dma_base;
+ last -= hose->sg_pci->dma_base;
+ baddr &= PAGE_MASK;
+ size = PAGE_ALIGN(last) - baddr;
+
+ /*
+ * Map it
+ */
+ area = get_vm_area(size, VM_IOREMAP);
+ if (!area)
+ return NULL;
+
+ ptes = hose->sg_pci->ptes;
+ for (vaddr = (unsigned long)area->addr;
+ baddr <= last;
+ baddr += PAGE_SIZE, vaddr += PAGE_SIZE) {
+ pfn = ptes[baddr >> PAGE_SHIFT];
+ if (!(pfn & 1)) {
+ printk("ioremap failed... pte not valid...\n");
+ vfree(area->addr);
+ return NULL;
+ }
+ pfn >>= 1; /* make it a true pfn */
+
+ if (__alpha_remap_area_pages(vaddr,
+ pfn << PAGE_SHIFT,
+ PAGE_SIZE, 0)) {
+ printk("FAILED to map...\n");
+ vfree(area->addr);
+ return NULL;
+ }
+ }
+
+ flush_tlb_all();
+
+ vaddr = (unsigned long)area->addr + (addr & ~PAGE_MASK);
+ return (void __iomem *) vaddr;
+ }
+
+ return NULL;
+}
+
+void
+titan_iounmap(volatile void __iomem *xaddr)
+{
+ unsigned long addr = (unsigned long) xaddr;
+ if (addr >= VMALLOC_START)
+ vfree((void *)(PAGE_MASK & addr));
+}
+
+int
+titan_is_mmio(const volatile void __iomem *xaddr)
+{
+ unsigned long addr = (unsigned long) xaddr;
+
+ if (addr >= VMALLOC_START)
+ return 1;
+ else
+ return (addr & 0x100000000UL) == 0;
+}
+
+#ifndef CONFIG_ALPHA_GENERIC
+EXPORT_SYMBOL(titan_ioremap);
+EXPORT_SYMBOL(titan_iounmap);
+EXPORT_SYMBOL(titan_is_mmio);
+#endif
+
+/*
+ * AGP GART Support.
+ */
+#include <linux/agp_backend.h>
+#include <asm/agp_backend.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+
+struct titan_agp_aperture {
+ struct pci_iommu_arena *arena;
+ long pg_start;
+ long pg_count;
+};
+
+static int
+titan_agp_setup(alpha_agp_info *agp)
+{
+ struct titan_agp_aperture *aper;
+
+ if (!alpha_agpgart_size)
+ return -ENOMEM;
+
+ aper = kmalloc(sizeof(struct titan_agp_aperture), GFP_KERNEL);
+ if (aper == NULL)
+ return -ENOMEM;
+
+ aper->arena = agp->hose->sg_pci;
+ aper->pg_count = alpha_agpgart_size / PAGE_SIZE;
+ aper->pg_start = iommu_reserve(aper->arena, aper->pg_count,
+ aper->pg_count - 1);
+ if (aper->pg_start < 0) {
+ printk(KERN_ERR "Failed to reserve AGP memory\n");
+ kfree(aper);
+ return -ENOMEM;
+ }
+
+ agp->aperture.bus_base =
+ aper->arena->dma_base + aper->pg_start * PAGE_SIZE;
+ agp->aperture.size = aper->pg_count * PAGE_SIZE;
+ agp->aperture.sysdata = aper;
+
+ return 0;
+}
+
+static void
+titan_agp_cleanup(alpha_agp_info *agp)
+{
+ struct titan_agp_aperture *aper = agp->aperture.sysdata;
+ int status;
+
+ status = iommu_release(aper->arena, aper->pg_start, aper->pg_count);
+ if (status == -EBUSY) {
+ printk(KERN_WARNING
+ "Attempted to release bound AGP memory - unbinding\n");
+ iommu_unbind(aper->arena, aper->pg_start, aper->pg_count);
+ status = iommu_release(aper->arena, aper->pg_start,
+ aper->pg_count);
+ }
+ if (status < 0)
+ printk(KERN_ERR "Failed to release AGP memory\n");
+
+ kfree(aper);
+ kfree(agp);
+}
+
+static int
+titan_agp_configure(alpha_agp_info *agp)
+{
+ union TPAchipPCTL pctl;
+ titan_pachip_port *port = agp->private;
+ pctl.pctl_q_whole = port->pctl.csr;
+
+ /* Side-Band Addressing? */
+ pctl.pctl_r_bits.apctl_v_agp_sba_en = agp->mode.bits.sba;
+
+ /* AGP Rate? */
+ pctl.pctl_r_bits.apctl_v_agp_rate = 0; /* 1x */
+ if (agp->mode.bits.rate & 2)
+ pctl.pctl_r_bits.apctl_v_agp_rate = 1; /* 2x */
+#if 0
+ if (agp->mode.bits.rate & 4)
+ pctl.pctl_r_bits.apctl_v_agp_rate = 2; /* 4x */
+#endif
+
+ /* RQ Depth? */
+ pctl.pctl_r_bits.apctl_v_agp_hp_rd = 2;
+ pctl.pctl_r_bits.apctl_v_agp_lp_rd = 7;
+
+ /*
+ * AGP Enable.
+ */
+ pctl.pctl_r_bits.apctl_v_agp_en = agp->mode.bits.enable;
+
+ /* Tell the user. */
+ printk("Enabling AGP: %dX%s\n",
+ 1 << pctl.pctl_r_bits.apctl_v_agp_rate,
+ pctl.pctl_r_bits.apctl_v_agp_sba_en ? " - SBA" : "");
+
+ /* Write it. */
+ port->pctl.csr = pctl.pctl_q_whole;
+
+ /* And wait at least 5000 66MHz cycles (per Titan spec). */
+ udelay(100);
+
+ return 0;
+}
+
+static int
+titan_agp_bind_memory(alpha_agp_info *agp, off_t pg_start, struct agp_memory *mem)
+{
+ struct titan_agp_aperture *aper = agp->aperture.sysdata;
+ return iommu_bind(aper->arena, aper->pg_start + pg_start,
+ mem->page_count, mem->memory);
+}
+
+static int
+titan_agp_unbind_memory(alpha_agp_info *agp, off_t pg_start, struct agp_memory *mem)
+{
+ struct titan_agp_aperture *aper = agp->aperture.sysdata;
+ return iommu_unbind(aper->arena, aper->pg_start + pg_start,
+ mem->page_count);
+}
+
+static unsigned long
+titan_agp_translate(alpha_agp_info *agp, dma_addr_t addr)
+{
+ struct titan_agp_aperture *aper = agp->aperture.sysdata;
+ unsigned long baddr = addr - aper->arena->dma_base;
+ unsigned long pte;
+
+ if (addr < agp->aperture.bus_base ||
+ addr >= agp->aperture.bus_base + agp->aperture.size) {
+ printk("%s: addr out of range\n", __FUNCTION__);
+ return -EINVAL;
+ }
+
+ pte = aper->arena->ptes[baddr >> PAGE_SHIFT];
+ if (!(pte & 1)) {
+ printk("%s: pte not valid\n", __FUNCTION__);
+ return -EINVAL;
+ }
+
+ return (pte >> 1) << PAGE_SHIFT;
+}
+
+struct alpha_agp_ops titan_agp_ops =
+{
+ .setup = titan_agp_setup,
+ .cleanup = titan_agp_cleanup,
+ .configure = titan_agp_configure,
+ .bind = titan_agp_bind_memory,
+ .unbind = titan_agp_unbind_memory,
+ .translate = titan_agp_translate
+};
+
+alpha_agp_info *
+titan_agp_info(void)
+{
+ alpha_agp_info *agp;
+ struct pci_controller *hose;
+ titan_pachip_port *port;
+ int hosenum = -1;
+ union TPAchipPCTL pctl;
+
+ /*
+ * Find the AGP port.
+ */
+ port = &TITAN_pachip0->a_port;
+ if (titan_query_agp(port))
+ hosenum = 2;
+ if (hosenum < 0 &&
+ titan_query_agp(port = &TITAN_pachip1->a_port))
+ hosenum = 3;
+
+ /*
+ * Find the hose the port is on.
+ */
+ for (hose = hose_head; hose; hose = hose->next)
+ if (hose->index == hosenum)
+ break;
+
+ if (!hose || !hose->sg_pci)
+ return NULL;
+
+ /*
+ * Allocate the info structure.
+ */
+ agp = kmalloc(sizeof(*agp), GFP_KERNEL);
+
+ /*
+ * Fill it in.
+ */
+ agp->hose = hose;
+ agp->private = port;
+ agp->ops = &titan_agp_ops;
+
+ /*
+ * Aperture - not configured until ops.setup().
+ *
+ * FIXME - should we go ahead and allocate it here?
+ */
+ agp->aperture.bus_base = 0;
+ agp->aperture.size = 0;
+ agp->aperture.sysdata = NULL;
+
+ /*
+ * Capabilities.
+ */
+ agp->capability.lw = 0;
+ agp->capability.bits.rate = 3; /* 2x, 1x */
+ agp->capability.bits.sba = 1;
+ agp->capability.bits.rq = 7; /* 8 - 1 */
+
+ /*
+ * Mode.
+ */
+ pctl.pctl_q_whole = port->pctl.csr;
+ agp->mode.lw = 0;
+ agp->mode.bits.rate = 1 << pctl.pctl_r_bits.apctl_v_agp_rate;
+ agp->mode.bits.sba = pctl.pctl_r_bits.apctl_v_agp_sba_en;
+ agp->mode.bits.rq = 7; /* RQ Depth? */
+ agp->mode.bits.enable = pctl.pctl_r_bits.apctl_v_agp_en;
+
+ return agp;
+}
diff --git a/arch/alpha/kernel/core_tsunami.c b/arch/alpha/kernel/core_tsunami.c
new file mode 100644
index 000000000000..8aa305bd6a2c
--- /dev/null
+++ b/arch/alpha/kernel/core_tsunami.c
@@ -0,0 +1,459 @@
+/*
+ * linux/arch/alpha/kernel/core_tsunami.c
+ *
+ * Based on code written by David A. Rusling (david.rusling@reo.mts.dec.com).
+ *
+ * Code common to all TSUNAMI core logic chips.
+ */
+
+#define __EXTERN_INLINE inline
+#include <asm/io.h>
+#include <asm/core_tsunami.h>
+#undef __EXTERN_INLINE
+
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/sched.h>
+#include <linux/init.h>
+#include <linux/bootmem.h>
+
+#include <asm/ptrace.h>
+#include <asm/smp.h>
+
+#include "proto.h"
+#include "pci_impl.h"
+
+/* Save Tsunami configuration data as the console had it set up. */
+
+struct
+{
+ unsigned long wsba[4];
+ unsigned long wsm[4];
+ unsigned long tba[4];
+} saved_config[2] __attribute__((common));
+
+/*
+ * NOTE: Herein lie back-to-back mb instructions. They are magic.
+ * One plausible explanation is that the I/O controller does not properly
+ * handle the system transaction. Another involves timing. Ho hum.
+ */
+
+/*
+ * BIOS32-style PCI interface:
+ */
+
+#define DEBUG_CONFIG 0
+
+#if DEBUG_CONFIG
+# define DBG_CFG(args) printk args
+#else
+# define DBG_CFG(args)
+#endif
+
+
+/*
+ * Given a bus, device, and function number, compute resulting
+ * configuration space address
+ * accordingly. It is therefore not safe to have concurrent
+ * invocations to configuration space access routines, but there
+ * really shouldn't be any need for this.
+ *
+ * Note that all config space accesses use Type 1 address format.
+ *
+ * Note also that type 1 is determined by non-zero bus number.
+ *
+ * Type 1:
+ *
+ * 3 3|3 3 2 2|2 2 2 2|2 2 2 2|1 1 1 1|1 1 1 1|1 1
+ * 3 2|1 0 9 8|7 6 5 4|3 2 1 0|9 8 7 6|5 4 3 2|1 0 9 8|7 6 5 4|3 2 1 0
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | | | | | | | | | | |B|B|B|B|B|B|B|B|D|D|D|D|D|F|F|F|R|R|R|R|R|R|0|1|
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ *
+ * 31:24 reserved
+ * 23:16 bus number (8 bits = 128 possible buses)
+ * 15:11 Device number (5 bits)
+ * 10:8 function number
+ * 7:2 register number
+ *
+ * Notes:
+ * The function number selects which function of a multi-function device
+ * (e.g., SCSI and Ethernet).
+ *
+ * The register selects a DWORD (32 bit) register offset. Hence it
+ * doesn't get shifted by 2 bits as we want to "drop" the bottom two
+ * bits.
+ */
+
+static int
+mk_conf_addr(struct pci_bus *pbus, unsigned int device_fn, int where,
+ unsigned long *pci_addr, unsigned char *type1)
+{
+ struct pci_controller *hose = pbus->sysdata;
+ unsigned long addr;
+ u8 bus = pbus->number;
+
+ DBG_CFG(("mk_conf_addr(bus=%d ,device_fn=0x%x, where=0x%x, "
+ "pci_addr=0x%p, type1=0x%p)\n",
+ bus, device_fn, where, pci_addr, type1));
+
+ if (!pbus->parent) /* No parent means peer PCI bus. */
+ bus = 0;
+ *type1 = (bus != 0);
+
+ addr = (bus << 16) | (device_fn << 8) | where;
+ addr |= hose->config_space_base;
+
+ *pci_addr = addr;
+ DBG_CFG(("mk_conf_addr: returning pci_addr 0x%lx\n", addr));
+ return 0;
+}
+
+static int
+tsunami_read_config(struct pci_bus *bus, unsigned int devfn, int where,
+ int size, u32 *value)
+{
+ unsigned long addr;
+ unsigned char type1;
+
+ if (mk_conf_addr(bus, devfn, where, &addr, &type1))
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ switch (size) {
+ case 1:
+ *value = __kernel_ldbu(*(vucp)addr);
+ break;
+ case 2:
+ *value = __kernel_ldwu(*(vusp)addr);
+ break;
+ case 4:
+ *value = *(vuip)addr;
+ break;
+ }
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int
+tsunami_write_config(struct pci_bus *bus, unsigned int devfn, int where,
+ int size, u32 value)
+{
+ unsigned long addr;
+ unsigned char type1;
+
+ if (mk_conf_addr(bus, devfn, where, &addr, &type1))
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ switch (size) {
+ case 1:
+ __kernel_stb(value, *(vucp)addr);
+ mb();
+ __kernel_ldbu(*(vucp)addr);
+ break;
+ case 2:
+ __kernel_stw(value, *(vusp)addr);
+ mb();
+ __kernel_ldwu(*(vusp)addr);
+ break;
+ case 4:
+ *(vuip)addr = value;
+ mb();
+ *(vuip)addr;
+ break;
+ }
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+struct pci_ops tsunami_pci_ops =
+{
+ .read = tsunami_read_config,
+ .write = tsunami_write_config,
+};
+
+void
+tsunami_pci_tbi(struct pci_controller *hose, dma_addr_t start, dma_addr_t end)
+{
+ tsunami_pchip *pchip = hose->index ? TSUNAMI_pchip1 : TSUNAMI_pchip0;
+ volatile unsigned long *csr;
+ unsigned long value;
+
+ /* We can invalidate up to 8 tlb entries in a go. The flush
+ matches against <31:16> in the pci address. */
+ csr = &pchip->tlbia.csr;
+ if (((start ^ end) & 0xffff0000) == 0)
+ csr = &pchip->tlbiv.csr;
+
+ /* For TBIA, it doesn't matter what value we write. For TBI,
+ it's the shifted tag bits. */
+ value = (start & 0xffff0000) >> 12;
+
+ *csr = value;
+ mb();
+ *csr;
+}
+
+#ifdef NXM_MACHINE_CHECKS_ON_TSUNAMI
+static long __init
+tsunami_probe_read(volatile unsigned long *vaddr)
+{
+ long dont_care, probe_result;
+ int cpu = smp_processor_id();
+ int s = swpipl(IPL_MCHECK - 1);
+
+ mcheck_taken(cpu) = 0;
+ mcheck_expected(cpu) = 1;
+ mb();
+ dont_care = *vaddr;
+ draina();
+ mcheck_expected(cpu) = 0;
+ probe_result = !mcheck_taken(cpu);
+ mcheck_taken(cpu) = 0;
+ setipl(s);
+
+ printk("dont_care == 0x%lx\n", dont_care);
+
+ return probe_result;
+}
+
+static long __init
+tsunami_probe_write(volatile unsigned long *vaddr)
+{
+ long true_contents, probe_result = 1;
+
+ TSUNAMI_cchip->misc.csr |= (1L << 28); /* clear NXM... */
+ true_contents = *vaddr;
+ *vaddr = 0;
+ draina();
+ if (TSUNAMI_cchip->misc.csr & (1L << 28)) {
+ int source = (TSUNAMI_cchip->misc.csr >> 29) & 7;
+ TSUNAMI_cchip->misc.csr |= (1L << 28); /* ...and unlock NXS. */
+ probe_result = 0;
+ printk("tsunami_probe_write: unit %d at 0x%016lx\n", source,
+ (unsigned long)vaddr);
+ }
+ if (probe_result)
+ *vaddr = true_contents;
+ return probe_result;
+}
+#else
+#define tsunami_probe_read(ADDR) 1
+#endif /* NXM_MACHINE_CHECKS_ON_TSUNAMI */
+
+#define FN __FUNCTION__
+
+static void __init
+tsunami_init_one_pchip(tsunami_pchip *pchip, int index)
+{
+ struct pci_controller *hose;
+
+ if (tsunami_probe_read(&pchip->pctl.csr) == 0)
+ return;
+
+ hose = alloc_pci_controller();
+ if (index == 0)
+ pci_isa_hose = hose;
+ hose->io_space = alloc_resource();
+ hose->mem_space = alloc_resource();
+
+ /* This is for userland consumption. For some reason, the 40-bit
+ PIO bias that we use in the kernel through KSEG didn't work for
+ the page table based user mappings. So make sure we get the
+ 43-bit PIO bias. */
+ hose->sparse_mem_base = 0;
+ hose->sparse_io_base = 0;
+ hose->dense_mem_base
+ = (TSUNAMI_MEM(index) & 0xffffffffffL) | 0x80000000000L;
+ hose->dense_io_base
+ = (TSUNAMI_IO(index) & 0xffffffffffL) | 0x80000000000L;
+
+ hose->config_space_base = TSUNAMI_CONF(index);
+ hose->index = index;
+
+ hose->io_space->start = TSUNAMI_IO(index) - TSUNAMI_IO_BIAS;
+ hose->io_space->end = hose->io_space->start + TSUNAMI_IO_SPACE - 1;
+ hose->io_space->name = pci_io_names[index];
+ hose->io_space->flags = IORESOURCE_IO;
+
+ hose->mem_space->start = TSUNAMI_MEM(index) - TSUNAMI_MEM_BIAS;
+ hose->mem_space->end = hose->mem_space->start + 0xffffffff;
+ hose->mem_space->name = pci_mem_names[index];
+ hose->mem_space->flags = IORESOURCE_MEM;
+
+ if (request_resource(&ioport_resource, hose->io_space) < 0)
+ printk(KERN_ERR "Failed to request IO on hose %d\n", index);
+ if (request_resource(&iomem_resource, hose->mem_space) < 0)
+ printk(KERN_ERR "Failed to request MEM on hose %d\n", index);
+
+ /*
+ * Save the existing PCI window translations. SRM will
+ * need them when we go to reboot.
+ */
+
+ saved_config[index].wsba[0] = pchip->wsba[0].csr;
+ saved_config[index].wsm[0] = pchip->wsm[0].csr;
+ saved_config[index].tba[0] = pchip->tba[0].csr;
+
+ saved_config[index].wsba[1] = pchip->wsba[1].csr;
+ saved_config[index].wsm[1] = pchip->wsm[1].csr;
+ saved_config[index].tba[1] = pchip->tba[1].csr;
+
+ saved_config[index].wsba[2] = pchip->wsba[2].csr;
+ saved_config[index].wsm[2] = pchip->wsm[2].csr;
+ saved_config[index].tba[2] = pchip->tba[2].csr;
+
+ saved_config[index].wsba[3] = pchip->wsba[3].csr;
+ saved_config[index].wsm[3] = pchip->wsm[3].csr;
+ saved_config[index].tba[3] = pchip->tba[3].csr;
+
+ /*
+ * Set up the PCI to main memory translation windows.
+ *
+ * Note: Window 3 is scatter-gather only
+ *
+ * Window 0 is scatter-gather 8MB at 8MB (for isa)
+ * Window 1 is scatter-gather (up to) 1GB at 1GB
+ * Window 2 is direct access 2GB at 2GB
+ *
+ * NOTE: we need the align_entry settings for Acer devices on ES40,
+ * specifically floppy and IDE when memory is larger than 2GB.
+ */
+ hose->sg_isa = iommu_arena_new(hose, 0x00800000, 0x00800000, 0);
+ /* Initially set for 4 PTEs, but will be overridden to 64K for ISA. */
+ hose->sg_isa->align_entry = 4;
+
+ hose->sg_pci = iommu_arena_new(hose, 0x40000000,
+ size_for_memory(0x40000000), 0);
+ hose->sg_pci->align_entry = 4; /* Tsunami caches 4 PTEs at a time */
+
+ __direct_map_base = 0x80000000;
+ __direct_map_size = 0x80000000;
+
+ pchip->wsba[0].csr = hose->sg_isa->dma_base | 3;
+ pchip->wsm[0].csr = (hose->sg_isa->size - 1) & 0xfff00000;
+ pchip->tba[0].csr = virt_to_phys(hose->sg_isa->ptes);
+
+ pchip->wsba[1].csr = hose->sg_pci->dma_base | 3;
+ pchip->wsm[1].csr = (hose->sg_pci->size - 1) & 0xfff00000;
+ pchip->tba[1].csr = virt_to_phys(hose->sg_pci->ptes);
+
+ pchip->wsba[2].csr = 0x80000000 | 1;
+ pchip->wsm[2].csr = (0x80000000 - 1) & 0xfff00000;
+ pchip->tba[2].csr = 0;
+
+ pchip->wsba[3].csr = 0;
+
+ /* Enable the Monster Window to make DAC pci64 possible. */
+ pchip->pctl.csr |= pctl_m_mwin;
+
+ tsunami_pci_tbi(hose, 0, -1);
+}
+
+void __init
+tsunami_init_arch(void)
+{
+#ifdef NXM_MACHINE_CHECKS_ON_TSUNAMI
+ unsigned long tmp;
+
+ /* Ho hum.. init_arch is called before init_IRQ, but we need to be
+ able to handle machine checks. So install the handler now. */
+ wrent(entInt, 0);
+
+ /* NXMs just don't matter to Tsunami--unless they make it
+ choke completely. */
+ tmp = (unsigned long)(TSUNAMI_cchip - 1);
+ printk("%s: probing bogus address: 0x%016lx\n", FN, bogus_addr);
+ printk("\tprobe %s\n",
+ tsunami_probe_write((unsigned long *)bogus_addr)
+ ? "succeeded" : "failed");
+#endif /* NXM_MACHINE_CHECKS_ON_TSUNAMI */
+
+#if 0
+ printk("%s: CChip registers:\n", FN);
+ printk("%s: CSR_CSC 0x%lx\n", FN, TSUNAMI_cchip->csc.csr);
+ printk("%s: CSR_MTR 0x%lx\n", FN, TSUNAMI_cchip.mtr.csr);
+ printk("%s: CSR_MISC 0x%lx\n", FN, TSUNAMI_cchip->misc.csr);
+ printk("%s: CSR_DIM0 0x%lx\n", FN, TSUNAMI_cchip->dim0.csr);
+ printk("%s: CSR_DIM1 0x%lx\n", FN, TSUNAMI_cchip->dim1.csr);
+ printk("%s: CSR_DIR0 0x%lx\n", FN, TSUNAMI_cchip->dir0.csr);
+ printk("%s: CSR_DIR1 0x%lx\n", FN, TSUNAMI_cchip->dir1.csr);
+ printk("%s: CSR_DRIR 0x%lx\n", FN, TSUNAMI_cchip->drir.csr);
+
+ printk("%s: DChip registers:\n");
+ printk("%s: CSR_DSC 0x%lx\n", FN, TSUNAMI_dchip->dsc.csr);
+ printk("%s: CSR_STR 0x%lx\n", FN, TSUNAMI_dchip->str.csr);
+ printk("%s: CSR_DREV 0x%lx\n", FN, TSUNAMI_dchip->drev.csr);
+#endif
+ /* With multiple PCI busses, we play with I/O as physical addrs. */
+ ioport_resource.end = ~0UL;
+
+ /* Find how many hoses we have, and initialize them. TSUNAMI
+ and TYPHOON can have 2, but might only have 1 (DS10). */
+
+ tsunami_init_one_pchip(TSUNAMI_pchip0, 0);
+ if (TSUNAMI_cchip->csc.csr & 1L<<14)
+ tsunami_init_one_pchip(TSUNAMI_pchip1, 1);
+}
+
+static void
+tsunami_kill_one_pchip(tsunami_pchip *pchip, int index)
+{
+ pchip->wsba[0].csr = saved_config[index].wsba[0];
+ pchip->wsm[0].csr = saved_config[index].wsm[0];
+ pchip->tba[0].csr = saved_config[index].tba[0];
+
+ pchip->wsba[1].csr = saved_config[index].wsba[1];
+ pchip->wsm[1].csr = saved_config[index].wsm[1];
+ pchip->tba[1].csr = saved_config[index].tba[1];
+
+ pchip->wsba[2].csr = saved_config[index].wsba[2];
+ pchip->wsm[2].csr = saved_config[index].wsm[2];
+ pchip->tba[2].csr = saved_config[index].tba[2];
+
+ pchip->wsba[3].csr = saved_config[index].wsba[3];
+ pchip->wsm[3].csr = saved_config[index].wsm[3];
+ pchip->tba[3].csr = saved_config[index].tba[3];
+}
+
+void
+tsunami_kill_arch(int mode)
+{
+ tsunami_kill_one_pchip(TSUNAMI_pchip0, 0);
+ if (TSUNAMI_cchip->csc.csr & 1L<<14)
+ tsunami_kill_one_pchip(TSUNAMI_pchip1, 1);
+}
+
+static inline void
+tsunami_pci_clr_err_1(tsunami_pchip *pchip)
+{
+ pchip->perror.csr;
+ pchip->perror.csr = 0x040;
+ mb();
+ pchip->perror.csr;
+}
+
+static inline void
+tsunami_pci_clr_err(void)
+{
+ tsunami_pci_clr_err_1(TSUNAMI_pchip0);
+
+ /* TSUNAMI and TYPHOON can have 2, but might only have 1 (DS10) */
+ if (TSUNAMI_cchip->csc.csr & 1L<<14)
+ tsunami_pci_clr_err_1(TSUNAMI_pchip1);
+}
+
+void
+tsunami_machine_check(unsigned long vector, unsigned long la_ptr,
+ struct pt_regs * regs)
+{
+ /* Clear error before any reporting. */
+ mb();
+ mb(); /* magic */
+ draina();
+ tsunami_pci_clr_err();
+ wrmces(0x7);
+ mb();
+
+ process_mcheck_info(vector, la_ptr, regs, "TSUNAMI",
+ mcheck_expected(smp_processor_id()));
+}
diff --git a/arch/alpha/kernel/core_wildfire.c b/arch/alpha/kernel/core_wildfire.c
new file mode 100644
index 000000000000..2b767a1bad96
--- /dev/null
+++ b/arch/alpha/kernel/core_wildfire.c
@@ -0,0 +1,658 @@
+/*
+ * linux/arch/alpha/kernel/core_wildfire.c
+ *
+ * Wildfire support.
+ *
+ * Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
+ */
+
+#define __EXTERN_INLINE inline
+#include <asm/io.h>
+#include <asm/core_wildfire.h>
+#undef __EXTERN_INLINE
+
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/sched.h>
+#include <linux/init.h>
+
+#include <asm/ptrace.h>
+#include <asm/smp.h>
+
+#include "proto.h"
+#include "pci_impl.h"
+
+#define DEBUG_CONFIG 0
+#define DEBUG_DUMP_REGS 0
+#define DEBUG_DUMP_CONFIG 1
+
+#if DEBUG_CONFIG
+# define DBG_CFG(args) printk args
+#else
+# define DBG_CFG(args)
+#endif
+
+#if DEBUG_DUMP_REGS
+static void wildfire_dump_pci_regs(int qbbno, int hoseno);
+static void wildfire_dump_pca_regs(int qbbno, int pcano);
+static void wildfire_dump_qsa_regs(int qbbno);
+static void wildfire_dump_qsd_regs(int qbbno);
+static void wildfire_dump_iop_regs(int qbbno);
+static void wildfire_dump_gp_regs(int qbbno);
+#endif
+#if DEBUG_DUMP_CONFIG
+static void wildfire_dump_hardware_config(void);
+#endif
+
+unsigned char wildfire_hard_qbb_map[WILDFIRE_MAX_QBB];
+unsigned char wildfire_soft_qbb_map[WILDFIRE_MAX_QBB];
+#define QBB_MAP_EMPTY 0xff
+
+unsigned long wildfire_hard_qbb_mask;
+unsigned long wildfire_soft_qbb_mask;
+unsigned long wildfire_gp_mask;
+unsigned long wildfire_hs_mask;
+unsigned long wildfire_iop_mask;
+unsigned long wildfire_ior_mask;
+unsigned long wildfire_pca_mask;
+unsigned long wildfire_cpu_mask;
+unsigned long wildfire_mem_mask;
+
+void __init
+wildfire_init_hose(int qbbno, int hoseno)
+{
+ struct pci_controller *hose;
+ wildfire_pci *pci;
+
+ hose = alloc_pci_controller();
+ hose->io_space = alloc_resource();
+ hose->mem_space = alloc_resource();
+
+ /* This is for userland consumption. */
+ hose->sparse_mem_base = 0;
+ hose->sparse_io_base = 0;
+ hose->dense_mem_base = WILDFIRE_MEM(qbbno, hoseno);
+ hose->dense_io_base = WILDFIRE_IO(qbbno, hoseno);
+
+ hose->config_space_base = WILDFIRE_CONF(qbbno, hoseno);
+ hose->index = (qbbno << 3) + hoseno;
+
+ hose->io_space->start = WILDFIRE_IO(qbbno, hoseno) - WILDFIRE_IO_BIAS;
+ hose->io_space->end = hose->io_space->start + WILDFIRE_IO_SPACE - 1;
+ hose->io_space->name = pci_io_names[hoseno];
+ hose->io_space->flags = IORESOURCE_IO;
+
+ hose->mem_space->start = WILDFIRE_MEM(qbbno, hoseno)-WILDFIRE_MEM_BIAS;
+ hose->mem_space->end = hose->mem_space->start + 0xffffffff;
+ hose->mem_space->name = pci_mem_names[hoseno];
+ hose->mem_space->flags = IORESOURCE_MEM;
+
+ if (request_resource(&ioport_resource, hose->io_space) < 0)
+ printk(KERN_ERR "Failed to request IO on qbb %d hose %d\n",
+ qbbno, hoseno);
+ if (request_resource(&iomem_resource, hose->mem_space) < 0)
+ printk(KERN_ERR "Failed to request MEM on qbb %d hose %d\n",
+ qbbno, hoseno);
+
+#if DEBUG_DUMP_REGS
+ wildfire_dump_pci_regs(qbbno, hoseno);
+#endif
+
+ /*
+ * Set up the PCI to main memory translation windows.
+ *
+ * Note: Window 3 is scatter-gather only
+ *
+ * Window 0 is scatter-gather 8MB at 8MB (for isa)
+ * Window 1 is direct access 1GB at 1GB
+ * Window 2 is direct access 1GB at 2GB
+ * Window 3 is scatter-gather 128MB at 3GB
+ * ??? We ought to scale window 3 memory.
+ *
+ */
+ hose->sg_isa = iommu_arena_new(hose, 0x00800000, 0x00800000, 0);
+ hose->sg_pci = iommu_arena_new(hose, 0xc0000000, 0x08000000, 0);
+
+ pci = WILDFIRE_pci(qbbno, hoseno);
+
+ pci->pci_window[0].wbase.csr = hose->sg_isa->dma_base | 3;
+ pci->pci_window[0].wmask.csr = (hose->sg_isa->size - 1) & 0xfff00000;
+ pci->pci_window[0].tbase.csr = virt_to_phys(hose->sg_isa->ptes);
+
+ pci->pci_window[1].wbase.csr = 0x40000000 | 1;
+ pci->pci_window[1].wmask.csr = (0x40000000 -1) & 0xfff00000;
+ pci->pci_window[1].tbase.csr = 0;
+
+ pci->pci_window[2].wbase.csr = 0x80000000 | 1;
+ pci->pci_window[2].wmask.csr = (0x40000000 -1) & 0xfff00000;
+ pci->pci_window[2].tbase.csr = 0x40000000;
+
+ pci->pci_window[3].wbase.csr = hose->sg_pci->dma_base | 3;
+ pci->pci_window[3].wmask.csr = (hose->sg_pci->size - 1) & 0xfff00000;
+ pci->pci_window[3].tbase.csr = virt_to_phys(hose->sg_pci->ptes);
+
+ wildfire_pci_tbi(hose, 0, 0); /* Flush TLB at the end. */
+}
+
+void __init
+wildfire_init_pca(int qbbno, int pcano)
+{
+
+ /* Test for PCA existence first. */
+ if (!WILDFIRE_PCA_EXISTS(qbbno, pcano))
+ return;
+
+#if DEBUG_DUMP_REGS
+ wildfire_dump_pca_regs(qbbno, pcano);
+#endif
+
+ /* Do both hoses of the PCA. */
+ wildfire_init_hose(qbbno, (pcano << 1) + 0);
+ wildfire_init_hose(qbbno, (pcano << 1) + 1);
+}
+
+void __init
+wildfire_init_qbb(int qbbno)
+{
+ int pcano;
+
+ /* Test for QBB existence first. */
+ if (!WILDFIRE_QBB_EXISTS(qbbno))
+ return;
+
+#if DEBUG_DUMP_REGS
+ wildfire_dump_qsa_regs(qbbno);
+ wildfire_dump_qsd_regs(qbbno);
+ wildfire_dump_iop_regs(qbbno);
+ wildfire_dump_gp_regs(qbbno);
+#endif
+
+ /* Init all PCAs here. */
+ for (pcano = 0; pcano < WILDFIRE_PCA_PER_QBB; pcano++) {
+ wildfire_init_pca(qbbno, pcano);
+ }
+}
+
+void __init
+wildfire_hardware_probe(void)
+{
+ unsigned long temp;
+ unsigned int hard_qbb, soft_qbb;
+ wildfire_fast_qsd *fast = WILDFIRE_fast_qsd();
+ wildfire_qsd *qsd;
+ wildfire_qsa *qsa;
+ wildfire_iop *iop;
+ wildfire_gp *gp;
+ wildfire_ne *ne;
+ wildfire_fe *fe;
+ int i;
+
+ temp = fast->qsd_whami.csr;
+#if 0
+ printk(KERN_ERR "fast QSD_WHAMI at base %p is 0x%lx\n", fast, temp);
+#endif
+
+ hard_qbb = (temp >> 8) & 7;
+ soft_qbb = (temp >> 4) & 7;
+
+ /* Init the HW configuration variables. */
+ wildfire_hard_qbb_mask = (1 << hard_qbb);
+ wildfire_soft_qbb_mask = (1 << soft_qbb);
+
+ wildfire_gp_mask = 0;
+ wildfire_hs_mask = 0;
+ wildfire_iop_mask = 0;
+ wildfire_ior_mask = 0;
+ wildfire_pca_mask = 0;
+
+ wildfire_cpu_mask = 0;
+ wildfire_mem_mask = 0;
+
+ memset(wildfire_hard_qbb_map, QBB_MAP_EMPTY, WILDFIRE_MAX_QBB);
+ memset(wildfire_soft_qbb_map, QBB_MAP_EMPTY, WILDFIRE_MAX_QBB);
+
+ /* First, determine which QBBs are present. */
+ qsa = WILDFIRE_qsa(soft_qbb);
+
+ temp = qsa->qsa_qbb_id.csr;
+#if 0
+ printk(KERN_ERR "QSA_QBB_ID at base %p is 0x%lx\n", qsa, temp);
+#endif
+
+ if (temp & 0x40) /* Is there an HS? */
+ wildfire_hs_mask = 1;
+
+ if (temp & 0x20) { /* Is there a GP? */
+ gp = WILDFIRE_gp(soft_qbb);
+ temp = 0;
+ for (i = 0; i < 4; i++) {
+ temp |= gp->gpa_qbb_map[i].csr << (i * 8);
+#if 0
+ printk(KERN_ERR "GPA_QBB_MAP[%d] at base %p is 0x%lx\n",
+ i, gp, temp);
+#endif
+ }
+
+ for (hard_qbb = 0; hard_qbb < WILDFIRE_MAX_QBB; hard_qbb++) {
+ if (temp & 8) { /* Is there a QBB? */
+ soft_qbb = temp & 7;
+ wildfire_hard_qbb_mask |= (1 << hard_qbb);
+ wildfire_soft_qbb_mask |= (1 << soft_qbb);
+ }
+ temp >>= 4;
+ }
+ wildfire_gp_mask = wildfire_soft_qbb_mask;
+ }
+
+ /* Next determine each QBBs resources. */
+ for (soft_qbb = 0; soft_qbb < WILDFIRE_MAX_QBB; soft_qbb++) {
+ if (WILDFIRE_QBB_EXISTS(soft_qbb)) {
+ qsd = WILDFIRE_qsd(soft_qbb);
+ temp = qsd->qsd_whami.csr;
+#if 0
+ printk(KERN_ERR "QSD_WHAMI at base %p is 0x%lx\n", qsd, temp);
+#endif
+ hard_qbb = (temp >> 8) & 7;
+ wildfire_hard_qbb_map[hard_qbb] = soft_qbb;
+ wildfire_soft_qbb_map[soft_qbb] = hard_qbb;
+
+ qsa = WILDFIRE_qsa(soft_qbb);
+ temp = qsa->qsa_qbb_pop[0].csr;
+#if 0
+ printk(KERN_ERR "QSA_QBB_POP_0 at base %p is 0x%lx\n", qsa, temp);
+#endif
+ wildfire_cpu_mask |= ((temp >> 0) & 0xf) << (soft_qbb << 2);
+ wildfire_mem_mask |= ((temp >> 4) & 0xf) << (soft_qbb << 2);
+
+ temp = qsa->qsa_qbb_pop[1].csr;
+#if 0
+ printk(KERN_ERR "QSA_QBB_POP_1 at base %p is 0x%lx\n", qsa, temp);
+#endif
+ wildfire_iop_mask |= (1 << soft_qbb);
+ wildfire_ior_mask |= ((temp >> 4) & 0xf) << (soft_qbb << 2);
+
+ temp = qsa->qsa_qbb_id.csr;
+#if 0
+ printk(KERN_ERR "QSA_QBB_ID at %p is 0x%lx\n", qsa, temp);
+#endif
+ if (temp & 0x20)
+ wildfire_gp_mask |= (1 << soft_qbb);
+
+ /* Probe for PCA existence here. */
+ for (i = 0; i < WILDFIRE_PCA_PER_QBB; i++) {
+ iop = WILDFIRE_iop(soft_qbb);
+ ne = WILDFIRE_ne(soft_qbb, i);
+ fe = WILDFIRE_fe(soft_qbb, i);
+
+ if ((iop->iop_hose[i].init.csr & 1) == 1 &&
+ ((ne->ne_what_am_i.csr & 0xf00000300UL) == 0x100000300UL) &&
+ ((fe->fe_what_am_i.csr & 0xf00000300UL) == 0x100000200UL))
+ {
+ wildfire_pca_mask |= 1 << ((soft_qbb << 2) + i);
+ }
+ }
+
+ }
+ }
+#if DEBUG_DUMP_CONFIG
+ wildfire_dump_hardware_config();
+#endif
+}
+
+void __init
+wildfire_init_arch(void)
+{
+ int qbbno;
+
+ /* With multiple PCI buses, we play with I/O as physical addrs. */
+ ioport_resource.end = ~0UL;
+
+
+ /* Probe the hardware for info about configuration. */
+ wildfire_hardware_probe();
+
+ /* Now init all the found QBBs. */
+ for (qbbno = 0; qbbno < WILDFIRE_MAX_QBB; qbbno++) {
+ wildfire_init_qbb(qbbno);
+ }
+
+ /* Normal direct PCI DMA mapping. */
+ __direct_map_base = 0x40000000UL;
+ __direct_map_size = 0x80000000UL;
+}
+
+void
+wildfire_machine_check(unsigned long vector, unsigned long la_ptr,
+ struct pt_regs * regs)
+{
+ mb();
+ mb(); /* magic */
+ draina();
+ /* FIXME: clear pci errors */
+ wrmces(0x7);
+ mb();
+
+ process_mcheck_info(vector, la_ptr, regs, "WILDFIRE",
+ mcheck_expected(smp_processor_id()));
+}
+
+void
+wildfire_kill_arch(int mode)
+{
+}
+
+void
+wildfire_pci_tbi(struct pci_controller *hose, dma_addr_t start, dma_addr_t end)
+{
+ int qbbno = hose->index >> 3;
+ int hoseno = hose->index & 7;
+ wildfire_pci *pci = WILDFIRE_pci(qbbno, hoseno);
+
+ mb();
+ pci->pci_flush_tlb.csr; /* reading does the trick */
+}
+
+static int
+mk_conf_addr(struct pci_bus *pbus, unsigned int device_fn, int where,
+ unsigned long *pci_addr, unsigned char *type1)
+{
+ struct pci_controller *hose = pbus->sysdata;
+ unsigned long addr;
+ u8 bus = pbus->number;
+
+ DBG_CFG(("mk_conf_addr(bus=%d ,device_fn=0x%x, where=0x%x, "
+ "pci_addr=0x%p, type1=0x%p)\n",
+ bus, device_fn, where, pci_addr, type1));
+
+ if (!pbus->parent) /* No parent means peer PCI bus. */
+ bus = 0;
+ *type1 = (bus != 0);
+
+ addr = (bus << 16) | (device_fn << 8) | where;
+ addr |= hose->config_space_base;
+
+ *pci_addr = addr;
+ DBG_CFG(("mk_conf_addr: returning pci_addr 0x%lx\n", addr));
+ return 0;
+}
+
+static int
+wildfire_read_config(struct pci_bus *bus, unsigned int devfn, int where,
+ int size, u32 *value)
+{
+ unsigned long addr;
+ unsigned char type1;
+
+ if (mk_conf_addr(bus, devfn, where, &addr, &type1))
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ switch (size) {
+ case 1:
+ *value = __kernel_ldbu(*(vucp)addr);
+ break;
+ case 2:
+ *value = __kernel_ldwu(*(vusp)addr);
+ break;
+ case 4:
+ *value = *(vuip)addr;
+ break;
+ }
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int
+wildfire_write_config(struct pci_bus *bus, unsigned int devfn, int where,
+ int size, u32 value)
+{
+ unsigned long addr;
+ unsigned char type1;
+
+ if (mk_conf_addr(bus, devfn, where, &addr, &type1))
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ switch (size) {
+ case 1:
+ __kernel_stb(value, *(vucp)addr);
+ mb();
+ __kernel_ldbu(*(vucp)addr);
+ break;
+ case 2:
+ __kernel_stw(value, *(vusp)addr);
+ mb();
+ __kernel_ldwu(*(vusp)addr);
+ break;
+ case 4:
+ *(vuip)addr = value;
+ mb();
+ *(vuip)addr;
+ break;
+ }
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+struct pci_ops wildfire_pci_ops =
+{
+ .read = wildfire_read_config,
+ .write = wildfire_write_config,
+};
+
+
+/*
+ * NUMA Support
+ */
+int wildfire_pa_to_nid(unsigned long pa)
+{
+ return pa >> 36;
+}
+
+int wildfire_cpuid_to_nid(int cpuid)
+{
+ /* assume 4 CPUs per node */
+ return cpuid >> 2;
+}
+
+unsigned long wildfire_node_mem_start(int nid)
+{
+ /* 64GB per node */
+ return (unsigned long)nid * (64UL * 1024 * 1024 * 1024);
+}
+
+unsigned long wildfire_node_mem_size(int nid)
+{
+ /* 64GB per node */
+ return 64UL * 1024 * 1024 * 1024;
+}
+
+#if DEBUG_DUMP_REGS
+
+static void __init
+wildfire_dump_pci_regs(int qbbno, int hoseno)
+{
+ wildfire_pci *pci = WILDFIRE_pci(qbbno, hoseno);
+ int i;
+
+ printk(KERN_ERR "PCI registers for QBB %d hose %d (%p)\n",
+ qbbno, hoseno, pci);
+
+ printk(KERN_ERR " PCI_IO_ADDR_EXT: 0x%16lx\n",
+ pci->pci_io_addr_ext.csr);
+ printk(KERN_ERR " PCI_CTRL: 0x%16lx\n", pci->pci_ctrl.csr);
+ printk(KERN_ERR " PCI_ERR_SUM: 0x%16lx\n", pci->pci_err_sum.csr);
+ printk(KERN_ERR " PCI_ERR_ADDR: 0x%16lx\n", pci->pci_err_addr.csr);
+ printk(KERN_ERR " PCI_STALL_CNT: 0x%16lx\n", pci->pci_stall_cnt.csr);
+ printk(KERN_ERR " PCI_PEND_INT: 0x%16lx\n", pci->pci_pend_int.csr);
+ printk(KERN_ERR " PCI_SENT_INT: 0x%16lx\n", pci->pci_sent_int.csr);
+
+ printk(KERN_ERR " DMA window registers for QBB %d hose %d (%p)\n",
+ qbbno, hoseno, pci);
+ for (i = 0; i < 4; i++) {
+ printk(KERN_ERR " window %d: 0x%16lx 0x%16lx 0x%16lx\n", i,
+ pci->pci_window[i].wbase.csr,
+ pci->pci_window[i].wmask.csr,
+ pci->pci_window[i].tbase.csr);
+ }
+ printk(KERN_ERR "\n");
+}
+
+static void __init
+wildfire_dump_pca_regs(int qbbno, int pcano)
+{
+ wildfire_pca *pca = WILDFIRE_pca(qbbno, pcano);
+ int i;
+
+ printk(KERN_ERR "PCA registers for QBB %d PCA %d (%p)\n",
+ qbbno, pcano, pca);
+
+ printk(KERN_ERR " PCA_WHAT_AM_I: 0x%16lx\n", pca->pca_what_am_i.csr);
+ printk(KERN_ERR " PCA_ERR_SUM: 0x%16lx\n", pca->pca_err_sum.csr);
+ printk(KERN_ERR " PCA_PEND_INT: 0x%16lx\n", pca->pca_pend_int.csr);
+ printk(KERN_ERR " PCA_SENT_INT: 0x%16lx\n", pca->pca_sent_int.csr);
+ printk(KERN_ERR " PCA_STDIO_EL: 0x%16lx\n",
+ pca->pca_stdio_edge_level.csr);
+
+ printk(KERN_ERR " PCA target registers for QBB %d PCA %d (%p)\n",
+ qbbno, pcano, pca);
+ for (i = 0; i < 4; i++) {
+ printk(KERN_ERR " target %d: 0x%16lx 0x%16lx\n", i,
+ pca->pca_int[i].target.csr,
+ pca->pca_int[i].enable.csr);
+ }
+
+ printk(KERN_ERR "\n");
+}
+
+static void __init
+wildfire_dump_qsa_regs(int qbbno)
+{
+ wildfire_qsa *qsa = WILDFIRE_qsa(qbbno);
+ int i;
+
+ printk(KERN_ERR "QSA registers for QBB %d (%p)\n", qbbno, qsa);
+
+ printk(KERN_ERR " QSA_QBB_ID: 0x%16lx\n", qsa->qsa_qbb_id.csr);
+ printk(KERN_ERR " QSA_PORT_ENA: 0x%16lx\n", qsa->qsa_port_ena.csr);
+ printk(KERN_ERR " QSA_REF_INT: 0x%16lx\n", qsa->qsa_ref_int.csr);
+
+ for (i = 0; i < 5; i++)
+ printk(KERN_ERR " QSA_CONFIG_%d: 0x%16lx\n",
+ i, qsa->qsa_config[i].csr);
+
+ for (i = 0; i < 2; i++)
+ printk(KERN_ERR " QSA_QBB_POP_%d: 0x%16lx\n",
+ i, qsa->qsa_qbb_pop[0].csr);
+
+ printk(KERN_ERR "\n");
+}
+
+static void __init
+wildfire_dump_qsd_regs(int qbbno)
+{
+ wildfire_qsd *qsd = WILDFIRE_qsd(qbbno);
+
+ printk(KERN_ERR "QSD registers for QBB %d (%p)\n", qbbno, qsd);
+
+ printk(KERN_ERR " QSD_WHAMI: 0x%16lx\n", qsd->qsd_whami.csr);
+ printk(KERN_ERR " QSD_REV: 0x%16lx\n", qsd->qsd_rev.csr);
+ printk(KERN_ERR " QSD_PORT_PRESENT: 0x%16lx\n",
+ qsd->qsd_port_present.csr);
+ printk(KERN_ERR " QSD_PORT_ACTUVE: 0x%16lx\n",
+ qsd->qsd_port_active.csr);
+ printk(KERN_ERR " QSD_FAULT_ENA: 0x%16lx\n",
+ qsd->qsd_fault_ena.csr);
+ printk(KERN_ERR " QSD_CPU_INT_ENA: 0x%16lx\n",
+ qsd->qsd_cpu_int_ena.csr);
+ printk(KERN_ERR " QSD_MEM_CONFIG: 0x%16lx\n",
+ qsd->qsd_mem_config.csr);
+ printk(KERN_ERR " QSD_ERR_SUM: 0x%16lx\n",
+ qsd->qsd_err_sum.csr);
+
+ printk(KERN_ERR "\n");
+}
+
+static void __init
+wildfire_dump_iop_regs(int qbbno)
+{
+ wildfire_iop *iop = WILDFIRE_iop(qbbno);
+ int i;
+
+ printk(KERN_ERR "IOP registers for QBB %d (%p)\n", qbbno, iop);
+
+ printk(KERN_ERR " IOA_CONFIG: 0x%16lx\n", iop->ioa_config.csr);
+ printk(KERN_ERR " IOD_CONFIG: 0x%16lx\n", iop->iod_config.csr);
+ printk(KERN_ERR " IOP_SWITCH_CREDITS: 0x%16lx\n",
+ iop->iop_switch_credits.csr);
+ printk(KERN_ERR " IOP_HOSE_CREDITS: 0x%16lx\n",
+ iop->iop_hose_credits.csr);
+
+ for (i = 0; i < 4; i++)
+ printk(KERN_ERR " IOP_HOSE_%d_INIT: 0x%16lx\n",
+ i, iop->iop_hose[i].init.csr);
+ for (i = 0; i < 4; i++)
+ printk(KERN_ERR " IOP_DEV_INT_TARGET_%d: 0x%16lx\n",
+ i, iop->iop_dev_int[i].target.csr);
+
+ printk(KERN_ERR "\n");
+}
+
+static void __init
+wildfire_dump_gp_regs(int qbbno)
+{
+ wildfire_gp *gp = WILDFIRE_gp(qbbno);
+ int i;
+
+ printk(KERN_ERR "GP registers for QBB %d (%p)\n", qbbno, gp);
+ for (i = 0; i < 4; i++)
+ printk(KERN_ERR " GPA_QBB_MAP_%d: 0x%16lx\n",
+ i, gp->gpa_qbb_map[i].csr);
+
+ printk(KERN_ERR " GPA_MEM_POP_MAP: 0x%16lx\n",
+ gp->gpa_mem_pop_map.csr);
+ printk(KERN_ERR " GPA_SCRATCH: 0x%16lx\n", gp->gpa_scratch.csr);
+ printk(KERN_ERR " GPA_DIAG: 0x%16lx\n", gp->gpa_diag.csr);
+ printk(KERN_ERR " GPA_CONFIG_0: 0x%16lx\n", gp->gpa_config_0.csr);
+ printk(KERN_ERR " GPA_INIT_ID: 0x%16lx\n", gp->gpa_init_id.csr);
+ printk(KERN_ERR " GPA_CONFIG_2: 0x%16lx\n", gp->gpa_config_2.csr);
+
+ printk(KERN_ERR "\n");
+}
+#endif /* DUMP_REGS */
+
+#if DEBUG_DUMP_CONFIG
+static void __init
+wildfire_dump_hardware_config(void)
+{
+ int i;
+
+ printk(KERN_ERR "Probed Hardware Configuration\n");
+
+ printk(KERN_ERR " hard_qbb_mask: 0x%16lx\n", wildfire_hard_qbb_mask);
+ printk(KERN_ERR " soft_qbb_mask: 0x%16lx\n", wildfire_soft_qbb_mask);
+
+ printk(KERN_ERR " gp_mask: 0x%16lx\n", wildfire_gp_mask);
+ printk(KERN_ERR " hs_mask: 0x%16lx\n", wildfire_hs_mask);
+ printk(KERN_ERR " iop_mask: 0x%16lx\n", wildfire_iop_mask);
+ printk(KERN_ERR " ior_mask: 0x%16lx\n", wildfire_ior_mask);
+ printk(KERN_ERR " pca_mask: 0x%16lx\n", wildfire_pca_mask);
+
+ printk(KERN_ERR " cpu_mask: 0x%16lx\n", wildfire_cpu_mask);
+ printk(KERN_ERR " mem_mask: 0x%16lx\n", wildfire_mem_mask);
+
+ printk(" hard_qbb_map: ");
+ for (i = 0; i < WILDFIRE_MAX_QBB; i++)
+ if (wildfire_hard_qbb_map[i] == QBB_MAP_EMPTY)
+ printk("--- ");
+ else
+ printk("%3d ", wildfire_hard_qbb_map[i]);
+ printk("\n");
+
+ printk(" soft_qbb_map: ");
+ for (i = 0; i < WILDFIRE_MAX_QBB; i++)
+ if (wildfire_soft_qbb_map[i] == QBB_MAP_EMPTY)
+ printk("--- ");
+ else
+ printk("%3d ", wildfire_soft_qbb_map[i]);
+ printk("\n");
+}
+#endif /* DUMP_CONFIG */
diff --git a/arch/alpha/kernel/entry.S b/arch/alpha/kernel/entry.S
new file mode 100644
index 000000000000..f0927ee53f29
--- /dev/null
+++ b/arch/alpha/kernel/entry.S
@@ -0,0 +1,957 @@
+/*
+ * arch/alpha/kernel/entry.S
+ *
+ * Kernel entry-points.
+ */
+
+#include <linux/config.h>
+#include <asm/asm_offsets.h>
+#include <asm/thread_info.h>
+#include <asm/pal.h>
+#include <asm/errno.h>
+#include <asm/unistd.h>
+
+ .text
+ .set noat
+
+/* Stack offsets. */
+#define SP_OFF 184
+#define SWITCH_STACK_SIZE 320
+
+/*
+ * This defines the normal kernel pt-regs layout.
+ *
+ * regs 9-15 preserved by C code
+ * regs 16-18 saved by PAL-code
+ * regs 29-30 saved and set up by PAL-code
+ * JRP - Save regs 16-18 in a special area of the stack, so that
+ * the palcode-provided values are available to the signal handler.
+ */
+
+#define SAVE_ALL \
+ subq $sp, SP_OFF, $sp; \
+ stq $0, 0($sp); \
+ stq $1, 8($sp); \
+ stq $2, 16($sp); \
+ stq $3, 24($sp); \
+ stq $4, 32($sp); \
+ stq $28, 144($sp); \
+ lda $2, alpha_mv; \
+ stq $5, 40($sp); \
+ stq $6, 48($sp); \
+ stq $7, 56($sp); \
+ stq $8, 64($sp); \
+ stq $19, 72($sp); \
+ stq $20, 80($sp); \
+ stq $21, 88($sp); \
+ ldq $2, HAE_CACHE($2); \
+ stq $22, 96($sp); \
+ stq $23, 104($sp); \
+ stq $24, 112($sp); \
+ stq $25, 120($sp); \
+ stq $26, 128($sp); \
+ stq $27, 136($sp); \
+ stq $2, 152($sp); \
+ stq $16, 160($sp); \
+ stq $17, 168($sp); \
+ stq $18, 176($sp)
+
+#define RESTORE_ALL \
+ lda $19, alpha_mv; \
+ ldq $0, 0($sp); \
+ ldq $1, 8($sp); \
+ ldq $2, 16($sp); \
+ ldq $3, 24($sp); \
+ ldq $21, 152($sp); \
+ ldq $20, HAE_CACHE($19); \
+ ldq $4, 32($sp); \
+ ldq $5, 40($sp); \
+ ldq $6, 48($sp); \
+ ldq $7, 56($sp); \
+ subq $20, $21, $20; \
+ ldq $8, 64($sp); \
+ beq $20, 99f; \
+ ldq $20, HAE_REG($19); \
+ stq $21, HAE_CACHE($19); \
+ stq $21, 0($20); \
+ ldq $0, 0($sp); \
+ ldq $1, 8($sp); \
+99:; \
+ ldq $19, 72($sp); \
+ ldq $20, 80($sp); \
+ ldq $21, 88($sp); \
+ ldq $22, 96($sp); \
+ ldq $23, 104($sp); \
+ ldq $24, 112($sp); \
+ ldq $25, 120($sp); \
+ ldq $26, 128($sp); \
+ ldq $27, 136($sp); \
+ ldq $28, 144($sp); \
+ addq $sp, SP_OFF, $sp
+
+/*
+ * Non-syscall kernel entry points.
+ */
+
+ .align 4
+ .globl entInt
+ .ent entInt
+entInt:
+ SAVE_ALL
+ lda $8, 0x3fff
+ lda $26, ret_from_sys_call
+ bic $sp, $8, $8
+ mov $sp, $19
+ jsr $31, do_entInt
+.end entInt
+
+ .align 4
+ .globl entArith
+ .ent entArith
+entArith:
+ SAVE_ALL
+ lda $8, 0x3fff
+ lda $26, ret_from_sys_call
+ bic $sp, $8, $8
+ mov $sp, $18
+ jsr $31, do_entArith
+.end entArith
+
+ .align 4
+ .globl entMM
+ .ent entMM
+entMM:
+ SAVE_ALL
+/* save $9 - $15 so the inline exception code can manipulate them. */
+ subq $sp, 56, $sp
+ stq $9, 0($sp)
+ stq $10, 8($sp)
+ stq $11, 16($sp)
+ stq $12, 24($sp)
+ stq $13, 32($sp)
+ stq $14, 40($sp)
+ stq $15, 48($sp)
+ addq $sp, 56, $19
+/* handle the fault */
+ lda $8, 0x3fff
+ bic $sp, $8, $8
+ jsr $26, do_page_fault
+/* reload the registers after the exception code played. */
+ ldq $9, 0($sp)
+ ldq $10, 8($sp)
+ ldq $11, 16($sp)
+ ldq $12, 24($sp)
+ ldq $13, 32($sp)
+ ldq $14, 40($sp)
+ ldq $15, 48($sp)
+ addq $sp, 56, $sp
+/* finish up the syscall as normal. */
+ br ret_from_sys_call
+.end entMM
+
+ .align 4
+ .globl entIF
+ .ent entIF
+entIF:
+ SAVE_ALL
+ lda $8, 0x3fff
+ lda $26, ret_from_sys_call
+ bic $sp, $8, $8
+ mov $sp, $17
+ jsr $31, do_entIF
+.end entIF
+
+ .align 4
+ .globl entUna
+ .ent entUna
+entUna:
+ lda $sp, -256($sp)
+ stq $0, 0($sp)
+ ldq $0, 256($sp) /* get PS */
+ stq $1, 8($sp)
+ stq $2, 16($sp)
+ stq $3, 24($sp)
+ and $0, 8, $0 /* user mode? */
+ stq $4, 32($sp)
+ bne $0, entUnaUser /* yup -> do user-level unaligned fault */
+ stq $5, 40($sp)
+ stq $6, 48($sp)
+ stq $7, 56($sp)
+ stq $8, 64($sp)
+ stq $9, 72($sp)
+ stq $10, 80($sp)
+ stq $11, 88($sp)
+ stq $12, 96($sp)
+ stq $13, 104($sp)
+ stq $14, 112($sp)
+ stq $15, 120($sp)
+ /* 16-18 PAL-saved */
+ stq $19, 152($sp)
+ stq $20, 160($sp)
+ stq $21, 168($sp)
+ stq $22, 176($sp)
+ stq $23, 184($sp)
+ stq $24, 192($sp)
+ stq $25, 200($sp)
+ stq $26, 208($sp)
+ stq $27, 216($sp)
+ stq $28, 224($sp)
+ stq $gp, 232($sp)
+ lda $8, 0x3fff
+ stq $31, 248($sp)
+ bic $sp, $8, $8
+ jsr $26, do_entUna
+ ldq $0, 0($sp)
+ ldq $1, 8($sp)
+ ldq $2, 16($sp)
+ ldq $3, 24($sp)
+ ldq $4, 32($sp)
+ ldq $5, 40($sp)
+ ldq $6, 48($sp)
+ ldq $7, 56($sp)
+ ldq $8, 64($sp)
+ ldq $9, 72($sp)
+ ldq $10, 80($sp)
+ ldq $11, 88($sp)
+ ldq $12, 96($sp)
+ ldq $13, 104($sp)
+ ldq $14, 112($sp)
+ ldq $15, 120($sp)
+ /* 16-18 PAL-saved */
+ ldq $19, 152($sp)
+ ldq $20, 160($sp)
+ ldq $21, 168($sp)
+ ldq $22, 176($sp)
+ ldq $23, 184($sp)
+ ldq $24, 192($sp)
+ ldq $25, 200($sp)
+ ldq $26, 208($sp)
+ ldq $27, 216($sp)
+ ldq $28, 224($sp)
+ ldq $gp, 232($sp)
+ lda $sp, 256($sp)
+ call_pal PAL_rti
+.end entUna
+
+ .align 4
+ .ent entUnaUser
+entUnaUser:
+ ldq $0, 0($sp) /* restore original $0 */
+ lda $sp, 256($sp) /* pop entUna's stack frame */
+ SAVE_ALL /* setup normal kernel stack */
+ lda $sp, -56($sp)
+ stq $9, 0($sp)
+ stq $10, 8($sp)
+ stq $11, 16($sp)
+ stq $12, 24($sp)
+ stq $13, 32($sp)
+ stq $14, 40($sp)
+ stq $15, 48($sp)
+ lda $8, 0x3fff
+ addq $sp, 56, $19
+ bic $sp, $8, $8
+ jsr $26, do_entUnaUser
+ ldq $9, 0($sp)
+ ldq $10, 8($sp)
+ ldq $11, 16($sp)
+ ldq $12, 24($sp)
+ ldq $13, 32($sp)
+ ldq $14, 40($sp)
+ ldq $15, 48($sp)
+ lda $sp, 56($sp)
+ br ret_from_sys_call
+.end entUnaUser
+
+ .align 4
+ .globl entDbg
+ .ent entDbg
+entDbg:
+ SAVE_ALL
+ lda $8, 0x3fff
+ lda $26, ret_from_sys_call
+ bic $sp, $8, $8
+ mov $sp, $16
+ jsr $31, do_entDbg
+.end entDbg
+
+/*
+ * The system call entry point is special. Most importantly, it looks
+ * like a function call to userspace as far as clobbered registers. We
+ * do preserve the argument registers (for syscall restarts) and $26
+ * (for leaf syscall functions).
+ *
+ * So much for theory. We don't take advantage of this yet.
+ *
+ * Note that a0-a2 are not saved by PALcode as with the other entry points.
+ */
+
+ .align 4
+ .globl entSys
+ .globl ret_from_sys_call
+ .ent entSys
+entSys:
+ SAVE_ALL
+ lda $8, 0x3fff
+ bic $sp, $8, $8
+ lda $4, NR_SYSCALLS($31)
+ stq $16, SP_OFF+24($sp)
+ lda $5, sys_call_table
+ lda $27, sys_ni_syscall
+ cmpult $0, $4, $4
+ ldl $3, TI_FLAGS($8)
+ stq $17, SP_OFF+32($sp)
+ s8addq $0, $5, $5
+ stq $18, SP_OFF+40($sp)
+ blbs $3, strace
+ beq $4, 1f
+ ldq $27, 0($5)
+1: jsr $26, ($27), alpha_ni_syscall
+ ldgp $gp, 0($26)
+ blt $0, $syscall_error /* the call failed */
+ stq $0, 0($sp)
+ stq $31, 72($sp) /* a3=0 => no error */
+
+ .align 4
+ret_from_sys_call:
+ cmovne $26, 0, $19 /* $19 = 0 => non-restartable */
+ ldq $0, SP_OFF($sp)
+ and $0, 8, $0
+ beq $0, restore_all
+ret_from_reschedule:
+ /* Make sure need_resched and sigpending don't change between
+ sampling and the rti. */
+ lda $16, 7
+ call_pal PAL_swpipl
+ ldl $5, TI_FLAGS($8)
+ and $5, _TIF_WORK_MASK, $2
+ bne $5, work_pending
+restore_all:
+ RESTORE_ALL
+ call_pal PAL_rti
+
+ .align 3
+$syscall_error:
+ /*
+ * Some system calls (e.g., ptrace) can return arbitrary
+ * values which might normally be mistaken as error numbers.
+ * Those functions must zero $0 (v0) directly in the stack
+ * frame to indicate that a negative return value wasn't an
+ * error number..
+ */
+ ldq $19, 0($sp) /* old syscall nr (zero if success) */
+ beq $19, $ret_success
+
+ ldq $20, 72($sp) /* .. and this a3 */
+ subq $31, $0, $0 /* with error in v0 */
+ addq $31, 1, $1 /* set a3 for errno return */
+ stq $0, 0($sp)
+ mov $31, $26 /* tell "ret_from_sys_call" we can restart */
+ stq $1, 72($sp) /* a3 for return */
+ br ret_from_sys_call
+
+$ret_success:
+ stq $0, 0($sp)
+ stq $31, 72($sp) /* a3=0 => no error */
+ br ret_from_sys_call
+.end entSys
+
+/*
+ * Do all cleanup when returning from all interrupts and system calls.
+ *
+ * Arguments:
+ * $5: TI_FLAGS.
+ * $8: current.
+ * $19: The old syscall number, or zero if this is not a return
+ * from a syscall that errored and is possibly restartable.
+ * $20: Error indication.
+ */
+
+ .align 4
+ .ent work_pending
+work_pending:
+ and $5, _TIF_NEED_RESCHED, $2
+ beq $2, $work_notifysig
+
+$work_resched:
+ subq $sp, 16, $sp
+ stq $19, 0($sp) /* save syscall nr */
+ stq $20, 8($sp) /* and error indication (a3) */
+ jsr $26, schedule
+ ldq $19, 0($sp)
+ ldq $20, 8($sp)
+ addq $sp, 16, $sp
+ /* Make sure need_resched and sigpending don't change between
+ sampling and the rti. */
+ lda $16, 7
+ call_pal PAL_swpipl
+ ldl $5, TI_FLAGS($8)
+ and $5, _TIF_WORK_MASK, $2
+ beq $2, restore_all
+ and $5, _TIF_NEED_RESCHED, $2
+ bne $2, $work_resched
+
+$work_notifysig:
+ mov $sp, $17
+ br $1, do_switch_stack
+ mov $5, $21
+ mov $sp, $18
+ mov $31, $16
+ jsr $26, do_notify_resume
+ bsr $1, undo_switch_stack
+ br restore_all
+.end work_pending
+
+/*
+ * PTRACE syscall handler
+ */
+
+ .align 4
+ .ent strace
+strace:
+ /* set up signal stack, call syscall_trace */
+ bsr $1, do_switch_stack
+ jsr $26, syscall_trace
+ bsr $1, undo_switch_stack
+
+ /* get the system call number and the arguments back.. */
+ ldq $0, 0($sp)
+ ldq $16, SP_OFF+24($sp)
+ ldq $17, SP_OFF+32($sp)
+ ldq $18, SP_OFF+40($sp)
+ ldq $19, 72($sp)
+ ldq $20, 80($sp)
+ ldq $21, 88($sp)
+
+ /* get the system call pointer.. */
+ lda $1, NR_SYSCALLS($31)
+ lda $2, sys_call_table
+ lda $27, alpha_ni_syscall
+ cmpult $0, $1, $1
+ s8addq $0, $2, $2
+ beq $1, 1f
+ ldq $27, 0($2)
+1: jsr $26, ($27), sys_gettimeofday
+ ldgp $gp, 0($26)
+
+ /* check return.. */
+ blt $0, $strace_error /* the call failed */
+ stq $31, 72($sp) /* a3=0 => no error */
+$strace_success:
+ stq $0, 0($sp) /* save return value */
+
+ bsr $1, do_switch_stack
+ jsr $26, syscall_trace
+ bsr $1, undo_switch_stack
+ br $31, ret_from_sys_call
+
+ .align 3
+$strace_error:
+ ldq $19, 0($sp) /* old syscall nr (zero if success) */
+ beq $19, $strace_success
+ ldq $20, 72($sp) /* .. and this a3 */
+
+ subq $31, $0, $0 /* with error in v0 */
+ addq $31, 1, $1 /* set a3 for errno return */
+ stq $0, 0($sp)
+ stq $1, 72($sp) /* a3 for return */
+
+ bsr $1, do_switch_stack
+ mov $19, $9 /* save old syscall number */
+ mov $20, $10 /* save old a3 */
+ jsr $26, syscall_trace
+ mov $9, $19
+ mov $10, $20
+ bsr $1, undo_switch_stack
+
+ mov $31, $26 /* tell "ret_from_sys_call" we can restart */
+ br ret_from_sys_call
+.end strace
+
+/*
+ * Save and restore the switch stack -- aka the balance of the user context.
+ */
+
+ .align 4
+ .ent do_switch_stack
+do_switch_stack:
+ lda $sp, -SWITCH_STACK_SIZE($sp)
+ stq $9, 0($sp)
+ stq $10, 8($sp)
+ stq $11, 16($sp)
+ stq $12, 24($sp)
+ stq $13, 32($sp)
+ stq $14, 40($sp)
+ stq $15, 48($sp)
+ stq $26, 56($sp)
+ stt $f0, 64($sp)
+ stt $f1, 72($sp)
+ stt $f2, 80($sp)
+ stt $f3, 88($sp)
+ stt $f4, 96($sp)
+ stt $f5, 104($sp)
+ stt $f6, 112($sp)
+ stt $f7, 120($sp)
+ stt $f8, 128($sp)
+ stt $f9, 136($sp)
+ stt $f10, 144($sp)
+ stt $f11, 152($sp)
+ stt $f12, 160($sp)
+ stt $f13, 168($sp)
+ stt $f14, 176($sp)
+ stt $f15, 184($sp)
+ stt $f16, 192($sp)
+ stt $f17, 200($sp)
+ stt $f18, 208($sp)
+ stt $f19, 216($sp)
+ stt $f20, 224($sp)
+ stt $f21, 232($sp)
+ stt $f22, 240($sp)
+ stt $f23, 248($sp)
+ stt $f24, 256($sp)
+ stt $f25, 264($sp)
+ stt $f26, 272($sp)
+ stt $f27, 280($sp)
+ mf_fpcr $f0 # get fpcr
+ stt $f28, 288($sp)
+ stt $f29, 296($sp)
+ stt $f30, 304($sp)
+ stt $f0, 312($sp) # save fpcr in slot of $f31
+ ldt $f0, 64($sp) # dont let "do_switch_stack" change fp state.
+ ret $31, ($1), 1
+.end do_switch_stack
+
+ .align 4
+ .ent undo_switch_stack
+undo_switch_stack:
+ ldq $9, 0($sp)
+ ldq $10, 8($sp)
+ ldq $11, 16($sp)
+ ldq $12, 24($sp)
+ ldq $13, 32($sp)
+ ldq $14, 40($sp)
+ ldq $15, 48($sp)
+ ldq $26, 56($sp)
+ ldt $f30, 312($sp) # get saved fpcr
+ ldt $f0, 64($sp)
+ ldt $f1, 72($sp)
+ ldt $f2, 80($sp)
+ ldt $f3, 88($sp)
+ mt_fpcr $f30 # install saved fpcr
+ ldt $f4, 96($sp)
+ ldt $f5, 104($sp)
+ ldt $f6, 112($sp)
+ ldt $f7, 120($sp)
+ ldt $f8, 128($sp)
+ ldt $f9, 136($sp)
+ ldt $f10, 144($sp)
+ ldt $f11, 152($sp)
+ ldt $f12, 160($sp)
+ ldt $f13, 168($sp)
+ ldt $f14, 176($sp)
+ ldt $f15, 184($sp)
+ ldt $f16, 192($sp)
+ ldt $f17, 200($sp)
+ ldt $f18, 208($sp)
+ ldt $f19, 216($sp)
+ ldt $f20, 224($sp)
+ ldt $f21, 232($sp)
+ ldt $f22, 240($sp)
+ ldt $f23, 248($sp)
+ ldt $f24, 256($sp)
+ ldt $f25, 264($sp)
+ ldt $f26, 272($sp)
+ ldt $f27, 280($sp)
+ ldt $f28, 288($sp)
+ ldt $f29, 296($sp)
+ ldt $f30, 304($sp)
+ lda $sp, SWITCH_STACK_SIZE($sp)
+ ret $31, ($1), 1
+.end undo_switch_stack
+
+/*
+ * The meat of the context switch code.
+ */
+
+ .align 4
+ .globl alpha_switch_to
+ .ent alpha_switch_to
+alpha_switch_to:
+ .prologue 0
+ bsr $1, do_switch_stack
+ call_pal PAL_swpctx
+ lda $8, 0x3fff
+ bsr $1, undo_switch_stack
+ bic $sp, $8, $8
+ mov $17, $0
+ ret
+.end alpha_switch_to
+
+/*
+ * New processes begin life here.
+ */
+
+ .globl ret_from_fork
+ .align 4
+ .ent ret_from_fork
+ret_from_fork:
+ lda $26, ret_from_sys_call
+ mov $17, $16
+ jmp $31, schedule_tail
+.end ret_from_fork
+
+/*
+ * kernel_thread(fn, arg, clone_flags)
+ */
+ .align 4
+ .globl kernel_thread
+ .ent kernel_thread
+kernel_thread:
+ /* We can be called from a module. */
+ ldgp $gp, 0($27)
+ .prologue 1
+ subq $sp, SP_OFF+6*8, $sp
+ br $1, 2f /* load start address */
+
+ /* We've now "returned" from a fake system call. */
+ unop
+ blt $0, 1f /* error? */
+ ldi $1, 0x3fff
+ beq $20, 1f /* parent or child? */
+
+ bic $sp, $1, $8 /* in child. */
+ jsr $26, ($27)
+ ldgp $gp, 0($26)
+ mov $0, $16
+ mov $31, $26
+ jmp $31, sys_exit
+
+1: ret /* in parent. */
+
+ .align 4
+2: /* Fake a system call stack frame, as we can't do system calls
+ from kernel space. Note that we store FN and ARG as they
+ need to be set up in the child for the call. Also store $8
+ and $26 for use in the parent. */
+ stq $31, SP_OFF($sp) /* ps */
+ stq $1, SP_OFF+8($sp) /* pc */
+ stq $gp, SP_OFF+16($sp) /* gp */
+ stq $16, 136($sp) /* $27; FN for child */
+ stq $17, SP_OFF+24($sp) /* $16; ARG for child */
+ stq $8, 64($sp) /* $8 */
+ stq $26, 128($sp) /* $26 */
+ /* Avoid the HAE being gratuitously wrong, to avoid restoring it. */
+ ldq $2, alpha_mv+HAE_CACHE
+ stq $2, 152($sp) /* HAE */
+
+ /* Shuffle FLAGS to the front; add CLONE_VM. */
+ ldi $1, CLONE_VM|CLONE_UNTRACED
+ or $18, $1, $16
+ bsr $26, sys_clone
+
+ /* We don't actually care for a3 success widgetry in the kernel.
+ Not for positive errno values. */
+ stq $0, 0($sp) /* $0 */
+ br restore_all
+.end kernel_thread
+
+/*
+ * execve(path, argv, envp)
+ */
+ .align 4
+ .globl execve
+ .ent execve
+execve:
+ /* We can be called from a module. */
+ ldgp $gp, 0($27)
+ lda $sp, -(32+SIZEOF_PT_REGS+8)($sp)
+ .frame $sp, 32+SIZEOF_PT_REGS+8, $26, 0
+ stq $26, 0($sp)
+ stq $16, 8($sp)
+ stq $17, 16($sp)
+ stq $18, 24($sp)
+ .prologue 1
+
+ lda $16, 32($sp)
+ lda $17, 0
+ lda $18, SIZEOF_PT_REGS
+ bsr $26, memset !samegp
+
+ /* Avoid the HAE being gratuitously wrong, which would cause us
+ to do the whole turn off interrupts thing and restore it. */
+ ldq $2, alpha_mv+HAE_CACHE
+ stq $2, 152+32($sp)
+
+ ldq $16, 8($sp)
+ ldq $17, 16($sp)
+ ldq $18, 24($sp)
+ lda $19, 32($sp)
+ bsr $26, do_execve !samegp
+
+ ldq $26, 0($sp)
+ bne $0, 1f /* error! */
+
+ /* Move the temporary pt_regs struct from its current location
+ to the top of the kernel stack frame. See copy_thread for
+ details for a normal process. */
+ lda $16, 0x4000 - SIZEOF_PT_REGS($8)
+ lda $17, 32($sp)
+ lda $18, SIZEOF_PT_REGS
+ bsr $26, memmove !samegp
+
+ /* Take that over as our new stack frame and visit userland! */
+ lda $sp, 0x4000 - SIZEOF_PT_REGS($8)
+ br $31, ret_from_sys_call
+
+1: lda $sp, 32+SIZEOF_PT_REGS+8($sp)
+ ret
+.end execve
+
+
+/*
+ * Special system calls. Most of these are special in that they either
+ * have to play switch_stack games or in some way use the pt_regs struct.
+ */
+ .align 4
+ .globl sys_fork
+ .ent sys_fork
+sys_fork:
+ .prologue 0
+ mov $sp, $21
+ bsr $1, do_switch_stack
+ bis $31, SIGCHLD, $16
+ mov $31, $17
+ mov $31, $18
+ mov $31, $19
+ mov $31, $20
+ jsr $26, alpha_clone
+ bsr $1, undo_switch_stack
+ ret
+.end sys_fork
+
+ .align 4
+ .globl sys_clone
+ .ent sys_clone
+sys_clone:
+ .prologue 0
+ mov $sp, $21
+ bsr $1, do_switch_stack
+ /* $16, $17, $18, $19, $20 come from the user. */
+ jsr $26, alpha_clone
+ bsr $1, undo_switch_stack
+ ret
+.end sys_clone
+
+ .align 4
+ .globl sys_vfork
+ .ent sys_vfork
+sys_vfork:
+ .prologue 0
+ mov $sp, $16
+ bsr $1, do_switch_stack
+ jsr $26, alpha_vfork
+ bsr $1, undo_switch_stack
+ ret
+.end sys_vfork
+
+ .align 4
+ .globl sys_sigreturn
+ .ent sys_sigreturn
+sys_sigreturn:
+ .prologue 0
+ mov $sp, $17
+ lda $18, -SWITCH_STACK_SIZE($sp)
+ lda $sp, -SWITCH_STACK_SIZE($sp)
+ jsr $26, do_sigreturn
+ br $1, undo_switch_stack
+ br ret_from_sys_call
+.end sys_sigreturn
+
+ .align 4
+ .globl sys_rt_sigreturn
+ .ent sys_rt_sigreturn
+sys_rt_sigreturn:
+ .prologue 0
+ mov $sp, $17
+ lda $18, -SWITCH_STACK_SIZE($sp)
+ lda $sp, -SWITCH_STACK_SIZE($sp)
+ jsr $26, do_rt_sigreturn
+ br $1, undo_switch_stack
+ br ret_from_sys_call
+.end sys_rt_sigreturn
+
+ .align 4
+ .globl sys_sigsuspend
+ .ent sys_sigsuspend
+sys_sigsuspend:
+ .prologue 0
+ mov $sp, $17
+ br $1, do_switch_stack
+ mov $sp, $18
+ subq $sp, 16, $sp
+ stq $26, 0($sp)
+ jsr $26, do_sigsuspend
+ ldq $26, 0($sp)
+ lda $sp, SWITCH_STACK_SIZE+16($sp)
+ ret
+.end sys_sigsuspend
+
+ .align 4
+ .globl sys_rt_sigsuspend
+ .ent sys_rt_sigsuspend
+sys_rt_sigsuspend:
+ .prologue 0
+ mov $sp, $18
+ br $1, do_switch_stack
+ mov $sp, $19
+ subq $sp, 16, $sp
+ stq $26, 0($sp)
+ jsr $26, do_rt_sigsuspend
+ ldq $26, 0($sp)
+ lda $sp, SWITCH_STACK_SIZE+16($sp)
+ ret
+.end sys_rt_sigsuspend
+
+ .align 4
+ .globl sys_sethae
+ .ent sys_sethae
+sys_sethae:
+ .prologue 0
+ stq $16, 152($sp)
+ ret
+.end sys_sethae
+
+ .align 4
+ .globl osf_getpriority
+ .ent osf_getpriority
+osf_getpriority:
+ lda $sp, -16($sp)
+ stq $26, 0($sp)
+ .prologue 0
+
+ jsr $26, sys_getpriority
+
+ ldq $26, 0($sp)
+ blt $0, 1f
+
+ /* Return value is the unbiased priority, i.e. 20 - prio.
+ This does result in negative return values, so signal
+ no error by writing into the R0 slot. */
+ lda $1, 20
+ stq $31, 16($sp)
+ subl $1, $0, $0
+ unop
+
+1: lda $sp, 16($sp)
+ ret
+.end osf_getpriority
+
+ .align 4
+ .globl sys_getxuid
+ .ent sys_getxuid
+sys_getxuid:
+ .prologue 0
+ ldq $2, TI_TASK($8)
+ ldl $0, TASK_UID($2)
+ ldl $1, TASK_EUID($2)
+ stq $1, 80($sp)
+ ret
+.end sys_getxuid
+
+ .align 4
+ .globl sys_getxgid
+ .ent sys_getxgid
+sys_getxgid:
+ .prologue 0
+ ldq $2, TI_TASK($8)
+ ldl $0, TASK_GID($2)
+ ldl $1, TASK_EGID($2)
+ stq $1, 80($sp)
+ ret
+.end sys_getxgid
+
+ .align 4
+ .globl sys_getxpid
+ .ent sys_getxpid
+sys_getxpid:
+ .prologue 0
+ ldq $2, TI_TASK($8)
+
+ /* See linux/kernel/timer.c sys_getppid for discussion
+ about this loop. */
+ ldq $3, TASK_REAL_PARENT($2)
+1: ldl $1, TASK_TGID($3)
+#ifdef CONFIG_SMP
+ mov $3, $4
+ mb
+ ldq $3, TASK_REAL_PARENT($2)
+ cmpeq $3, $4, $4
+ beq $4, 1b
+#endif
+ stq $1, 80($sp)
+ ldl $0, TASK_TGID($2)
+ ret
+.end sys_getxpid
+
+ .align 4
+ .globl sys_pipe
+ .ent sys_pipe
+sys_pipe:
+ lda $sp, -16($sp)
+ stq $26, 0($sp)
+ .prologue 0
+
+ lda $16, 8($sp)
+ jsr $26, do_pipe
+
+ ldq $26, 0($sp)
+ bne $0, 1f
+
+ /* The return values are in $0 and $20. */
+ ldl $1, 12($sp)
+ ldl $0, 8($sp)
+
+ stq $1, 80+16($sp)
+1: lda $sp, 16($sp)
+ ret
+.end sys_pipe
+
+ .align 4
+ .globl sys_ptrace
+ .ent sys_ptrace
+sys_ptrace:
+ .prologue 0
+ mov $sp, $20
+ jmp $31, do_sys_ptrace
+.end sys_ptrace
+
+ .align 4
+ .globl sys_execve
+ .ent sys_execve
+sys_execve:
+ .prologue 0
+ mov $sp, $19
+ jmp $31, do_sys_execve
+.end sys_execve
+
+ .align 4
+ .globl osf_sigprocmask
+ .ent osf_sigprocmask
+osf_sigprocmask:
+ .prologue 0
+ mov $sp, $18
+ jmp $31, do_osf_sigprocmask
+.end osf_sigprocmask
+
+ .align 4
+ .globl alpha_ni_syscall
+ .ent alpha_ni_syscall
+alpha_ni_syscall:
+ .prologue 0
+ /* Special because it also implements overflow handling via
+ syscall number 0. And if you recall, zero is a special
+ trigger for "not an error". Store large non-zero there. */
+ lda $0, -ENOSYS
+ unop
+ stq $0, 0($sp)
+ ret
+.end alpha_ni_syscall
diff --git a/arch/alpha/kernel/err_common.c b/arch/alpha/kernel/err_common.c
new file mode 100644
index 000000000000..687580b16b41
--- /dev/null
+++ b/arch/alpha/kernel/err_common.c
@@ -0,0 +1,321 @@
+/*
+ * linux/arch/alpha/kernel/err_common.c
+ *
+ * Copyright (C) 2000 Jeff Wiedemeier (Compaq Computer Corporation)
+ *
+ * Error handling code supporting Alpha systems
+ */
+
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/sched.h>
+
+#include <asm/io.h>
+#include <asm/hwrpb.h>
+#include <asm/smp.h>
+#include <asm/err_common.h>
+
+#include "err_impl.h"
+#include "proto.h"
+
+/*
+ * err_print_prefix -- error handling print routines should prefix
+ * all prints with this
+ */
+char *err_print_prefix = KERN_NOTICE;
+
+
+/*
+ * Generic
+ */
+void
+mchk_dump_mem(void *data, size_t length, char **annotation)
+{
+ unsigned long *ldata = data;
+ size_t i;
+
+ for (i = 0; (i * sizeof(*ldata)) < length; i++) {
+ if (annotation && !annotation[i])
+ annotation = NULL;
+ printk("%s %08x: %016lx %s\n",
+ err_print_prefix,
+ (unsigned)(i * sizeof(*ldata)), ldata[i],
+ annotation ? annotation[i] : "");
+ }
+}
+
+void
+mchk_dump_logout_frame(struct el_common *mchk_header)
+{
+ printk("%s -- Frame Header --\n"
+ " Frame Size: %d (0x%x) bytes\n"
+ " Flags: %s%s\n"
+ " MCHK Code: 0x%x\n"
+ " Frame Rev: %d\n"
+ " Proc Offset: 0x%08x\n"
+ " Sys Offset: 0x%08x\n"
+ " -- Processor Region --\n",
+ err_print_prefix,
+ mchk_header->size, mchk_header->size,
+ mchk_header->retry ? "RETRY " : "",
+ mchk_header->err2 ? "SECOND_ERR " : "",
+ mchk_header->code,
+ mchk_header->frame_rev,
+ mchk_header->proc_offset,
+ mchk_header->sys_offset);
+
+ mchk_dump_mem((void *)
+ ((unsigned long)mchk_header + mchk_header->proc_offset),
+ mchk_header->sys_offset - mchk_header->proc_offset,
+ NULL);
+
+ printk("%s -- System Region --\n", err_print_prefix);
+ mchk_dump_mem((void *)
+ ((unsigned long)mchk_header + mchk_header->sys_offset),
+ mchk_header->size - mchk_header->sys_offset,
+ NULL);
+ printk("%s -- End of Frame --\n", err_print_prefix);
+}
+
+
+/*
+ * Console Data Log
+ */
+/* Data */
+static struct el_subpacket_handler *subpacket_handler_list = NULL;
+static struct el_subpacket_annotation *subpacket_annotation_list = NULL;
+
+static struct el_subpacket *
+el_process_header_subpacket(struct el_subpacket *header)
+{
+ union el_timestamp timestamp;
+ char *name = "UNKNOWN EVENT";
+ int packet_count = 0;
+ int length = 0;
+
+ if (header->class != EL_CLASS__HEADER) {
+ printk("%s** Unexpected header CLASS %d TYPE %d, aborting\n",
+ err_print_prefix,
+ header->class, header->type);
+ return NULL;
+ }
+
+ switch(header->type) {
+ case EL_TYPE__HEADER__SYSTEM_ERROR_FRAME:
+ name = "SYSTEM ERROR";
+ length = header->by_type.sys_err.frame_length;
+ packet_count =
+ header->by_type.sys_err.frame_packet_count;
+ timestamp.as_int = 0;
+ break;
+ case EL_TYPE__HEADER__SYSTEM_EVENT_FRAME:
+ name = "SYSTEM EVENT";
+ length = header->by_type.sys_event.frame_length;
+ packet_count =
+ header->by_type.sys_event.frame_packet_count;
+ timestamp = header->by_type.sys_event.timestamp;
+ break;
+ case EL_TYPE__HEADER__HALT_FRAME:
+ name = "ERROR HALT";
+ length = header->by_type.err_halt.frame_length;
+ packet_count =
+ header->by_type.err_halt.frame_packet_count;
+ timestamp = header->by_type.err_halt.timestamp;
+ break;
+ case EL_TYPE__HEADER__LOGOUT_FRAME:
+ name = "LOGOUT FRAME";
+ length = header->by_type.logout_header.frame_length;
+ packet_count = 1;
+ timestamp.as_int = 0;
+ break;
+ default: /* Unknown */
+ printk("%s** Unknown header - CLASS %d TYPE %d, aborting\n",
+ err_print_prefix,
+ header->class, header->type);
+ return NULL;
+ }
+
+ printk("%s*** %s:\n"
+ " CLASS %d, TYPE %d\n",
+ err_print_prefix,
+ name,
+ header->class, header->type);
+ el_print_timestamp(&timestamp);
+
+ /*
+ * Process the subpackets
+ */
+ el_process_subpackets(header, packet_count);
+
+ /* return the next header */
+ header = (struct el_subpacket *)
+ ((unsigned long)header + header->length + length);
+ return header;
+}
+
+static struct el_subpacket *
+el_process_subpacket_reg(struct el_subpacket *header)
+{
+ struct el_subpacket *next = NULL;
+ struct el_subpacket_handler *h = subpacket_handler_list;
+
+ for (; h && h->class != header->class; h = h->next);
+ if (h) next = h->handler(header);
+
+ return next;
+}
+
+void
+el_print_timestamp(union el_timestamp *timestamp)
+{
+ if (timestamp->as_int)
+ printk("%s TIMESTAMP: %d/%d/%02d %d:%02d:%0d\n",
+ err_print_prefix,
+ timestamp->b.month, timestamp->b.day,
+ timestamp->b.year, timestamp->b.hour,
+ timestamp->b.minute, timestamp->b.second);
+}
+
+void
+el_process_subpackets(struct el_subpacket *header, int packet_count)
+{
+ struct el_subpacket *subpacket;
+ int i;
+
+ subpacket = (struct el_subpacket *)
+ ((unsigned long)header + header->length);
+
+ for (i = 0; subpacket && i < packet_count; i++) {
+ printk("%sPROCESSING SUBPACKET %d\n", err_print_prefix, i);
+ subpacket = el_process_subpacket(subpacket);
+ }
+}
+
+struct el_subpacket *
+el_process_subpacket(struct el_subpacket *header)
+{
+ struct el_subpacket *next = NULL;
+
+ switch(header->class) {
+ case EL_CLASS__TERMINATION:
+ /* Termination packet, there are no more */
+ break;
+ case EL_CLASS__HEADER:
+ next = el_process_header_subpacket(header);
+ break;
+ default:
+ if (NULL == (next = el_process_subpacket_reg(header))) {
+ printk("%s** Unexpected header CLASS %d TYPE %d"
+ " -- aborting.\n",
+ err_print_prefix,
+ header->class, header->type);
+ }
+ break;
+ }
+
+ return next;
+}
+
+void
+el_annotate_subpacket(struct el_subpacket *header)
+{
+ struct el_subpacket_annotation *a;
+ char **annotation = NULL;
+
+ for (a = subpacket_annotation_list; a; a = a->next) {
+ if (a->class == header->class &&
+ a->type == header->type &&
+ a->revision == header->revision) {
+ /*
+ * We found the annotation
+ */
+ annotation = a->annotation;
+ printk("%s %s\n", err_print_prefix, a->description);
+ break;
+ }
+ }
+
+ mchk_dump_mem(header, header->length, annotation);
+}
+
+static void __init
+cdl_process_console_data_log(int cpu, struct percpu_struct *pcpu)
+{
+ struct el_subpacket *header = (struct el_subpacket *)
+ (IDENT_ADDR | pcpu->console_data_log_pa);
+ int err;
+
+ printk("%s******* CONSOLE DATA LOG FOR CPU %d. *******\n"
+ "*** Error(s) were logged on a previous boot\n",
+ err_print_prefix, cpu);
+
+ for (err = 0; header && (header->class != EL_CLASS__TERMINATION); err++)
+ header = el_process_subpacket(header);
+
+ /* let the console know it's ok to clear the error(s) at restart */
+ pcpu->console_data_log_pa = 0;
+
+ printk("%s*** %d total error(s) logged\n"
+ "**** END OF CONSOLE DATA LOG FOR CPU %d ****\n",
+ err_print_prefix, err, cpu);
+}
+
+void __init
+cdl_check_console_data_log(void)
+{
+ struct percpu_struct *pcpu;
+ unsigned long cpu;
+
+ for (cpu = 0; cpu < hwrpb->nr_processors; cpu++) {
+ pcpu = (struct percpu_struct *)
+ ((unsigned long)hwrpb + hwrpb->processor_offset
+ + cpu * hwrpb->processor_size);
+ if (pcpu->console_data_log_pa)
+ cdl_process_console_data_log(cpu, pcpu);
+ }
+
+}
+
+int __init
+cdl_register_subpacket_annotation(struct el_subpacket_annotation *new)
+{
+ struct el_subpacket_annotation *a = subpacket_annotation_list;
+
+ if (a == NULL) subpacket_annotation_list = new;
+ else {
+ for (; a->next != NULL; a = a->next) {
+ if ((a->class == new->class && a->type == new->type) ||
+ a == new) {
+ printk("Attempted to re-register "
+ "subpacket annotation\n");
+ return -EINVAL;
+ }
+ }
+ a->next = new;
+ }
+ new->next = NULL;
+
+ return 0;
+}
+
+int __init
+cdl_register_subpacket_handler(struct el_subpacket_handler *new)
+{
+ struct el_subpacket_handler *h = subpacket_handler_list;
+
+ if (h == NULL) subpacket_handler_list = new;
+ else {
+ for (; h->next != NULL; h = h->next) {
+ if (h->class == new->class || h == new) {
+ printk("Attempted to re-register "
+ "subpacket handler\n");
+ return -EINVAL;
+ }
+ }
+ h->next = new;
+ }
+ new->next = NULL;
+
+ return 0;
+}
+
diff --git a/arch/alpha/kernel/err_ev6.c b/arch/alpha/kernel/err_ev6.c
new file mode 100644
index 000000000000..64f59f2fcf5c
--- /dev/null
+++ b/arch/alpha/kernel/err_ev6.c
@@ -0,0 +1,274 @@
+/*
+ * linux/arch/alpha/kernel/err_ev6.c
+ *
+ * Copyright (C) 2000 Jeff Wiedemeier (Compaq Computer Corporation)
+ *
+ * Error handling code supporting Alpha systems
+ */
+
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/sched.h>
+
+#include <asm/io.h>
+#include <asm/hwrpb.h>
+#include <asm/smp.h>
+#include <asm/err_common.h>
+#include <asm/err_ev6.h>
+
+#include "err_impl.h"
+#include "proto.h"
+
+static int
+ev6_parse_ibox(u64 i_stat, int print)
+{
+ int status = MCHK_DISPOSITION_REPORT;
+
+#define EV6__I_STAT__PAR (1UL << 29)
+#define EV6__I_STAT__ERRMASK (EV6__I_STAT__PAR)
+
+ if (!(i_stat & EV6__I_STAT__ERRMASK))
+ return MCHK_DISPOSITION_UNKNOWN_ERROR;
+
+ if (!print)
+ return status;
+
+ if (i_stat & EV6__I_STAT__PAR)
+ printk("%s Icache parity error\n", err_print_prefix);
+
+ return status;
+}
+
+static int
+ev6_parse_mbox(u64 mm_stat, u64 d_stat, u64 c_stat, int print)
+{
+ int status = MCHK_DISPOSITION_REPORT;
+
+#define EV6__MM_STAT__DC_TAG_PERR (1UL << 10)
+#define EV6__MM_STAT__ERRMASK (EV6__MM_STAT__DC_TAG_PERR)
+#define EV6__D_STAT__TPERR_P0 (1UL << 0)
+#define EV6__D_STAT__TPERR_P1 (1UL << 1)
+#define EV6__D_STAT__ECC_ERR_ST (1UL << 2)
+#define EV6__D_STAT__ECC_ERR_LD (1UL << 3)
+#define EV6__D_STAT__SEO (1UL << 4)
+#define EV6__D_STAT__ERRMASK (EV6__D_STAT__TPERR_P0 | \
+ EV6__D_STAT__TPERR_P1 | \
+ EV6__D_STAT__ECC_ERR_ST | \
+ EV6__D_STAT__ECC_ERR_LD | \
+ EV6__D_STAT__SEO)
+
+ if (!(d_stat & EV6__D_STAT__ERRMASK) &&
+ !(mm_stat & EV6__MM_STAT__ERRMASK))
+ return MCHK_DISPOSITION_UNKNOWN_ERROR;
+
+ if (!print)
+ return status;
+
+ if (mm_stat & EV6__MM_STAT__DC_TAG_PERR)
+ printk("%s Dcache tag parity error on probe\n",
+ err_print_prefix);
+ if (d_stat & EV6__D_STAT__TPERR_P0)
+ printk("%s Dcache tag parity error - pipe 0\n",
+ err_print_prefix);
+ if (d_stat & EV6__D_STAT__TPERR_P1)
+ printk("%s Dcache tag parity error - pipe 1\n",
+ err_print_prefix);
+ if (d_stat & EV6__D_STAT__ECC_ERR_ST)
+ printk("%s ECC error occurred on a store\n",
+ err_print_prefix);
+ if (d_stat & EV6__D_STAT__ECC_ERR_LD)
+ printk("%s ECC error occurred on a %s load\n",
+ err_print_prefix,
+ c_stat ? "" : "speculative ");
+ if (d_stat & EV6__D_STAT__SEO)
+ printk("%s Dcache second error\n", err_print_prefix);
+
+ return status;
+}
+
+static int
+ev6_parse_cbox(u64 c_addr, u64 c1_syn, u64 c2_syn,
+ u64 c_stat, u64 c_sts, int print)
+{
+ char *sourcename[] = { "UNKNOWN", "UNKNOWN", "UNKNOWN",
+ "MEMORY", "BCACHE", "DCACHE",
+ "BCACHE PROBE", "BCACHE PROBE" };
+ char *streamname[] = { "D", "I" };
+ char *bitsname[] = { "SINGLE", "DOUBLE" };
+ int status = MCHK_DISPOSITION_REPORT;
+ int source = -1, stream = -1, bits = -1;
+
+#define EV6__C_STAT__BC_PERR (0x01)
+#define EV6__C_STAT__DC_PERR (0x02)
+#define EV6__C_STAT__DSTREAM_MEM_ERR (0x03)
+#define EV6__C_STAT__DSTREAM_BC_ERR (0x04)
+#define EV6__C_STAT__DSTREAM_DC_ERR (0x05)
+#define EV6__C_STAT__PROBE_BC_ERR0 (0x06) /* both 6 and 7 indicate... */
+#define EV6__C_STAT__PROBE_BC_ERR1 (0x07) /* ...probe bc error. */
+#define EV6__C_STAT__ISTREAM_MEM_ERR (0x0B)
+#define EV6__C_STAT__ISTREAM_BC_ERR (0x0C)
+#define EV6__C_STAT__DSTREAM_MEM_DBL (0x13)
+#define EV6__C_STAT__DSTREAM_BC_DBL (0x14)
+#define EV6__C_STAT__ISTREAM_MEM_DBL (0x1B)
+#define EV6__C_STAT__ISTREAM_BC_DBL (0x1C)
+#define EV6__C_STAT__SOURCE_MEMORY (0x03)
+#define EV6__C_STAT__SOURCE_BCACHE (0x04)
+#define EV6__C_STAT__SOURCE__S (0)
+#define EV6__C_STAT__SOURCE__M (0x07)
+#define EV6__C_STAT__ISTREAM__S (3)
+#define EV6__C_STAT__ISTREAM__M (0x01)
+#define EV6__C_STAT__DOUBLE__S (4)
+#define EV6__C_STAT__DOUBLE__M (0x01)
+#define EV6__C_STAT__ERRMASK (0x1F)
+#define EV6__C_STS__SHARED (1 << 0)
+#define EV6__C_STS__DIRTY (1 << 1)
+#define EV6__C_STS__VALID (1 << 2)
+#define EV6__C_STS__PARITY (1 << 3)
+
+ if (!(c_stat & EV6__C_STAT__ERRMASK))
+ return MCHK_DISPOSITION_UNKNOWN_ERROR;
+
+ if (!print)
+ return status;
+
+ source = EXTRACT(c_stat, EV6__C_STAT__SOURCE);
+ stream = EXTRACT(c_stat, EV6__C_STAT__ISTREAM);
+ bits = EXTRACT(c_stat, EV6__C_STAT__DOUBLE);
+
+ if (c_stat & EV6__C_STAT__BC_PERR) {
+ printk("%s Bcache tag parity error\n", err_print_prefix);
+ source = -1;
+ }
+
+ if (c_stat & EV6__C_STAT__DC_PERR) {
+ printk("%s Dcache tag parity error\n", err_print_prefix);
+ source = -1;
+ }
+
+ if (c_stat == EV6__C_STAT__PROBE_BC_ERR0 ||
+ c_stat == EV6__C_STAT__PROBE_BC_ERR1) {
+ printk("%s Bcache single-bit error on a probe hit\n",
+ err_print_prefix);
+ source = -1;
+ }
+
+ if (source != -1)
+ printk("%s %s-STREAM %s-BIT ECC error from %s\n",
+ err_print_prefix,
+ streamname[stream], bitsname[bits], sourcename[source]);
+
+ printk("%s Address: 0x%016lx\n"
+ " Syndrome[upper.lower]: %02lx.%02lx\n",
+ err_print_prefix,
+ c_addr,
+ c2_syn, c1_syn);
+
+ if (source == EV6__C_STAT__SOURCE_MEMORY ||
+ source == EV6__C_STAT__SOURCE_BCACHE)
+ printk("%s Block status: %s%s%s%s\n",
+ err_print_prefix,
+ (c_sts & EV6__C_STS__SHARED) ? "SHARED " : "",
+ (c_sts & EV6__C_STS__DIRTY) ? "DIRTY " : "",
+ (c_sts & EV6__C_STS__VALID) ? "VALID " : "",
+ (c_sts & EV6__C_STS__PARITY) ? "PARITY " : "");
+
+ return status;
+}
+
+void
+ev6_register_error_handlers(void)
+{
+ /* None right now. */
+}
+
+int
+ev6_process_logout_frame(struct el_common *mchk_header, int print)
+{
+ struct el_common_EV6_mcheck *ev6mchk =
+ (struct el_common_EV6_mcheck *)mchk_header;
+ int status = MCHK_DISPOSITION_UNKNOWN_ERROR;
+
+ status |= ev6_parse_ibox(ev6mchk->I_STAT, print);
+ status |= ev6_parse_mbox(ev6mchk->MM_STAT, ev6mchk->DC_STAT,
+ ev6mchk->C_STAT, print);
+ status |= ev6_parse_cbox(ev6mchk->C_ADDR, ev6mchk->DC1_SYNDROME,
+ ev6mchk->DC0_SYNDROME, ev6mchk->C_STAT,
+ ev6mchk->C_STS, print);
+
+ if (!print)
+ return status;
+
+ if (status != MCHK_DISPOSITION_DISMISS) {
+ char *saved_err_prefix = err_print_prefix;
+
+ /*
+ * Dump some additional information from the frame
+ */
+ printk("%s EXC_ADDR: 0x%016lx IER_CM: 0x%016lx"
+ " ISUM: 0x%016lx\n"
+ " PAL_BASE: 0x%016lx I_CTL: 0x%016lx"
+ " PCTX: 0x%016lx\n",
+ err_print_prefix,
+ ev6mchk->EXC_ADDR, ev6mchk->IER_CM, ev6mchk->ISUM,
+ ev6mchk->PAL_BASE, ev6mchk->I_CTL, ev6mchk->PCTX);
+
+ if (status == MCHK_DISPOSITION_UNKNOWN_ERROR) {
+ printk("%s UNKNOWN error, frame follows:\n",
+ err_print_prefix);
+ } else {
+ /* had decode -- downgrade print level for frame */
+ err_print_prefix = KERN_NOTICE;
+ }
+
+ mchk_dump_logout_frame(mchk_header);
+
+ err_print_prefix = saved_err_prefix;
+ }
+
+ return status;
+}
+
+void
+ev6_machine_check(u64 vector, u64 la_ptr, struct pt_regs *regs)
+{
+ struct el_common *mchk_header = (struct el_common *)la_ptr;
+
+ /*
+ * Sync the processor
+ */
+ mb();
+ draina();
+
+ /*
+ * Parse the logout frame without printing first. If the only error(s)
+ * found are have a disposition of "dismiss", then just dismiss them
+ * and don't print any message
+ */
+ if (ev6_process_logout_frame(mchk_header, 0) !=
+ MCHK_DISPOSITION_DISMISS) {
+ char *saved_err_prefix = err_print_prefix;
+ err_print_prefix = KERN_CRIT;
+
+ /*
+ * Either a nondismissable error was detected or no
+ * recognized error was detected in the logout frame
+ * -- report the error in either case
+ */
+ printk("%s*CPU %s Error (Vector 0x%x) reported on CPU %d:\n",
+ err_print_prefix,
+ (vector == SCB_Q_PROCERR)?"Correctable":"Uncorrectable",
+ (unsigned int)vector, (int)smp_processor_id());
+
+ ev6_process_logout_frame(mchk_header, 1);
+ dik_show_regs(regs, NULL);
+
+ err_print_prefix = saved_err_prefix;
+ }
+
+ /*
+ * Release the logout frame
+ */
+ wrmces(0x7);
+ mb();
+}
+
diff --git a/arch/alpha/kernel/err_ev7.c b/arch/alpha/kernel/err_ev7.c
new file mode 100644
index 000000000000..bf52ba691957
--- /dev/null
+++ b/arch/alpha/kernel/err_ev7.c
@@ -0,0 +1,289 @@
+/*
+ * linux/arch/alpha/kernel/err_ev7.c
+ *
+ * Copyright (C) 2000 Jeff Wiedemeier (Compaq Computer Corporation)
+ *
+ * Error handling code supporting Alpha systems
+ */
+
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/sched.h>
+
+#include <asm/io.h>
+#include <asm/hwrpb.h>
+#include <asm/smp.h>
+#include <asm/err_common.h>
+#include <asm/err_ev7.h>
+
+#include "err_impl.h"
+#include "proto.h"
+
+struct ev7_lf_subpackets *
+ev7_collect_logout_frame_subpackets(struct el_subpacket *el_ptr,
+ struct ev7_lf_subpackets *lf_subpackets)
+{
+ struct el_subpacket *subpacket;
+ int i;
+
+ /*
+ * A Marvel machine check frame is always packaged in an
+ * el_subpacket of class HEADER, type LOGOUT_FRAME.
+ */
+ if (el_ptr->class != EL_CLASS__HEADER ||
+ el_ptr->type != EL_TYPE__HEADER__LOGOUT_FRAME)
+ return NULL;
+
+ /*
+ * It is a logout frame header. Look at the one subpacket.
+ */
+ el_ptr = (struct el_subpacket *)
+ ((unsigned long)el_ptr + el_ptr->length);
+
+ /*
+ * It has to be class PAL, type LOGOUT_FRAME.
+ */
+ if (el_ptr->class != EL_CLASS__PAL ||
+ el_ptr->type != EL_TYPE__PAL__LOGOUT_FRAME)
+ return NULL;
+
+ lf_subpackets->logout = (struct ev7_pal_logout_subpacket *)
+ el_ptr->by_type.raw.data_start;
+
+ /*
+ * Process the subpackets.
+ */
+ subpacket = (struct el_subpacket *)
+ ((unsigned long)el_ptr + el_ptr->length);
+ for (i = 0;
+ subpacket && i < lf_subpackets->logout->subpacket_count;
+ subpacket = (struct el_subpacket *)
+ ((unsigned long)subpacket + subpacket->length), i++) {
+ /*
+ * All subpackets should be class PAL.
+ */
+ if (subpacket->class != EL_CLASS__PAL) {
+ printk("%s**UNEXPECTED SUBPACKET CLASS %d "
+ "IN LOGOUT FRAME (packet %d\n",
+ err_print_prefix, subpacket->class, i);
+ return NULL;
+ }
+
+ /*
+ * Remember the subpacket.
+ */
+ switch(subpacket->type) {
+ case EL_TYPE__PAL__EV7_PROCESSOR:
+ lf_subpackets->ev7 =
+ (struct ev7_pal_processor_subpacket *)
+ subpacket->by_type.raw.data_start;
+ break;
+
+ case EL_TYPE__PAL__EV7_RBOX:
+ lf_subpackets->rbox = (struct ev7_pal_rbox_subpacket *)
+ subpacket->by_type.raw.data_start;
+ break;
+
+ case EL_TYPE__PAL__EV7_ZBOX:
+ lf_subpackets->zbox = (struct ev7_pal_zbox_subpacket *)
+ subpacket->by_type.raw.data_start;
+ break;
+
+ case EL_TYPE__PAL__EV7_IO:
+ lf_subpackets->io = (struct ev7_pal_io_subpacket *)
+ subpacket->by_type.raw.data_start;
+ break;
+
+ case EL_TYPE__PAL__ENV__AMBIENT_TEMPERATURE:
+ case EL_TYPE__PAL__ENV__AIRMOVER_FAN:
+ case EL_TYPE__PAL__ENV__VOLTAGE:
+ case EL_TYPE__PAL__ENV__INTRUSION:
+ case EL_TYPE__PAL__ENV__POWER_SUPPLY:
+ case EL_TYPE__PAL__ENV__LAN:
+ case EL_TYPE__PAL__ENV__HOT_PLUG:
+ lf_subpackets->env[ev7_lf_env_index(subpacket->type)] =
+ (struct ev7_pal_environmental_subpacket *)
+ subpacket->by_type.raw.data_start;
+ break;
+
+ default:
+ /*
+ * Don't know what kind of frame this is.
+ */
+ return NULL;
+ }
+ }
+
+ return lf_subpackets;
+}
+
+void
+ev7_machine_check(u64 vector, u64 la_ptr, struct pt_regs *regs)
+{
+ struct el_subpacket *el_ptr = (struct el_subpacket *)la_ptr;
+ char *saved_err_prefix = err_print_prefix;
+
+ /*
+ * Sync the processor
+ */
+ mb();
+ draina();
+
+ err_print_prefix = KERN_CRIT;
+ printk("%s*CPU %s Error (Vector 0x%x) reported on CPU %d\n",
+ err_print_prefix,
+ (vector == SCB_Q_PROCERR) ? "Correctable" : "Uncorrectable",
+ (unsigned int)vector, (int)smp_processor_id());
+ el_process_subpacket(el_ptr);
+ err_print_prefix = saved_err_prefix;
+
+ /*
+ * Release the logout frame
+ */
+ wrmces(0x7);
+ mb();
+}
+
+static char *el_ev7_processor_subpacket_annotation[] = {
+ "Subpacket Header", "I_STAT", "DC_STAT",
+ "C_ADDR", "C_SYNDROME_1", "C_SYNDROME_0",
+ "C_STAT", "C_STS", "MM_STAT",
+ "EXC_ADDR", "IER_CM", "ISUM",
+ "PAL_BASE", "I_CTL", "PROCESS_CONTEXT",
+ "CBOX_CTL", "CBOX_STP_CTL", "CBOX_ACC_CTL",
+ "CBOX_LCL_SET", "CBOX_GLB_SET", "BBOX_CTL",
+ "BBOX_ERR_STS", "BBOX_ERR_IDX", "CBOX_DDP_ERR_STS",
+ "BBOX_DAT_RMP", NULL
+};
+
+static char *el_ev7_zbox_subpacket_annotation[] = {
+ "Subpacket Header",
+ "ZBOX(0): DRAM_ERR_STATUS_2 / DRAM_ERR_STATUS_1",
+ "ZBOX(0): DRAM_ERROR_CTL / DRAM_ERR_STATUS_3",
+ "ZBOX(0): DIFT_TIMEOUT / DRAM_ERR_ADR",
+ "ZBOX(0): FRC_ERR_ADR / DRAM_MAPPER_CTL",
+ "ZBOX(0): reserved / DIFT_ERR_STATUS",
+ "ZBOX(1): DRAM_ERR_STATUS_2 / DRAM_ERR_STATUS_1",
+ "ZBOX(1): DRAM_ERROR_CTL / DRAM_ERR_STATUS_3",
+ "ZBOX(1): DIFT_TIMEOUT / DRAM_ERR_ADR",
+ "ZBOX(1): FRC_ERR_ADR / DRAM_MAPPER_CTL",
+ "ZBOX(1): reserved / DIFT_ERR_STATUS",
+ "CBOX_CTL", "CBOX_STP_CTL",
+ "ZBOX(0)_ERROR_PA", "ZBOX(1)_ERROR_PA",
+ "ZBOX(0)_ORED_SYNDROME","ZBOX(1)_ORED_SYNDROME",
+ NULL
+};
+
+static char *el_ev7_rbox_subpacket_annotation[] = {
+ "Subpacket Header", "RBOX_CFG", "RBOX_N_CFG",
+ "RBOX_S_CFG", "RBOX_E_CFG", "RBOX_W_CFG",
+ "RBOX_N_ERR", "RBOX_S_ERR", "RBOX_E_ERR",
+ "RBOX_W_ERR", "RBOX_IO_CFG", "RBOX_IO_ERR",
+ "RBOX_L_ERR", "RBOX_WHOAMI", "RBOX_IMASL",
+ "RBOX_INTQ", "RBOX_INT", NULL
+};
+
+static char *el_ev7_io_subpacket_annotation[] = {
+ "Subpacket Header", "IO_ASIC_REV", "IO_SYS_REV",
+ "IO7_UPH", "HPI_CTL", "CRD_CTL",
+ "HEI_CTL", "PO7_ERROR_SUM","PO7_UNCRR_SYM",
+ "PO7_CRRCT_SYM", "PO7_UGBGE_SYM","PO7_ERR_PKT0",
+ "PO7_ERR_PKT1", "reserved", "reserved",
+ "PO0_ERR_SUM", "PO0_TLB_ERR", "PO0_SPL_COMPLT",
+ "PO0_TRANS_SUM", "PO0_FIRST_ERR","PO0_MULT_ERR",
+ "DM CSR PH", "DM CSR PH", "DM CSR PH",
+ "DM CSR PH", "reserved",
+ "PO1_ERR_SUM", "PO1_TLB_ERR", "PO1_SPL_COMPLT",
+ "PO1_TRANS_SUM", "PO1_FIRST_ERR","PO1_MULT_ERR",
+ "DM CSR PH", "DM CSR PH", "DM CSR PH",
+ "DM CSR PH", "reserved",
+ "PO2_ERR_SUM", "PO2_TLB_ERR", "PO2_SPL_COMPLT",
+ "PO2_TRANS_SUM", "PO2_FIRST_ERR","PO2_MULT_ERR",
+ "DM CSR PH", "DM CSR PH", "DM CSR PH",
+ "DM CSR PH", "reserved",
+ "PO3_ERR_SUM", "PO3_TLB_ERR", "PO3_SPL_COMPLT",
+ "PO3_TRANS_SUM", "PO3_FIRST_ERR","PO3_MULT_ERR",
+ "DM CSR PH", "DM CSR PH", "DM CSR PH",
+ "DM CSR PH", "reserved",
+ NULL
+};
+
+static struct el_subpacket_annotation el_ev7_pal_annotations[] = {
+ SUBPACKET_ANNOTATION(EL_CLASS__PAL,
+ EL_TYPE__PAL__EV7_PROCESSOR,
+ 1,
+ "EV7 Processor Subpacket",
+ el_ev7_processor_subpacket_annotation),
+ SUBPACKET_ANNOTATION(EL_CLASS__PAL,
+ EL_TYPE__PAL__EV7_ZBOX,
+ 1,
+ "EV7 ZBOX Subpacket",
+ el_ev7_zbox_subpacket_annotation),
+ SUBPACKET_ANNOTATION(EL_CLASS__PAL,
+ EL_TYPE__PAL__EV7_RBOX,
+ 1,
+ "EV7 RBOX Subpacket",
+ el_ev7_rbox_subpacket_annotation),
+ SUBPACKET_ANNOTATION(EL_CLASS__PAL,
+ EL_TYPE__PAL__EV7_IO,
+ 1,
+ "EV7 IO Subpacket",
+ el_ev7_io_subpacket_annotation)
+};
+
+static struct el_subpacket *
+ev7_process_pal_subpacket(struct el_subpacket *header)
+{
+ struct ev7_pal_subpacket *packet;
+
+ if (header->class != EL_CLASS__PAL) {
+ printk("%s ** Unexpected header CLASS %d TYPE %d, aborting\n",
+ err_print_prefix,
+ header->class, header->type);
+ return NULL;
+ }
+
+ packet = (struct ev7_pal_subpacket *)header->by_type.raw.data_start;
+
+ switch(header->type) {
+ case EL_TYPE__PAL__LOGOUT_FRAME:
+ printk("%s*** MCHK occurred on LPID %ld (RBOX %lx)\n",
+ err_print_prefix,
+ packet->by_type.logout.whami,
+ packet->by_type.logout.rbox_whami);
+ el_print_timestamp(&packet->by_type.logout.timestamp);
+ printk("%s EXC_ADDR: %016lx\n"
+ " HALT_CODE: %lx\n",
+ err_print_prefix,
+ packet->by_type.logout.exc_addr,
+ packet->by_type.logout.halt_code);
+ el_process_subpackets(header,
+ packet->by_type.logout.subpacket_count);
+ break;
+ default:
+ printk("%s ** PAL TYPE %d SUBPACKET\n",
+ err_print_prefix,
+ header->type);
+ el_annotate_subpacket(header);
+ break;
+ }
+
+ return (struct el_subpacket *)((unsigned long)header + header->length);
+}
+
+struct el_subpacket_handler ev7_pal_subpacket_handler =
+ SUBPACKET_HANDLER_INIT(EL_CLASS__PAL, ev7_process_pal_subpacket);
+
+void
+ev7_register_error_handlers(void)
+{
+ int i;
+
+ for(i = 0;
+ i<sizeof(el_ev7_pal_annotations)/sizeof(el_ev7_pal_annotations[1]);
+ i++) {
+ cdl_register_subpacket_annotation(&el_ev7_pal_annotations[i]);
+ }
+ cdl_register_subpacket_handler(&ev7_pal_subpacket_handler);
+}
+
diff --git a/arch/alpha/kernel/err_impl.h b/arch/alpha/kernel/err_impl.h
new file mode 100644
index 000000000000..64e9b73809fa
--- /dev/null
+++ b/arch/alpha/kernel/err_impl.h
@@ -0,0 +1,85 @@
+/*
+ * linux/arch/alpha/kernel/err_impl.h
+ *
+ * Copyright (C) 2000 Jeff Wiedemeier (Compaq Computer Corporation)
+ *
+ * Contains declarations and macros to support Alpha error handling
+ * implementations.
+ */
+
+union el_timestamp;
+struct el_subpacket;
+struct ev7_lf_subpackets;
+
+struct el_subpacket_annotation {
+ struct el_subpacket_annotation *next;
+ u16 class;
+ u16 type;
+ u16 revision;
+ char *description;
+ char **annotation;
+};
+#define SUBPACKET_ANNOTATION(c, t, r, d, a) {NULL, (c), (t), (r), (d), (a)}
+
+struct el_subpacket_handler {
+ struct el_subpacket_handler *next;
+ u16 class;
+ struct el_subpacket *(*handler)(struct el_subpacket *);
+};
+#define SUBPACKET_HANDLER_INIT(c, h) {NULL, (c), (h)}
+
+/*
+ * Manipulate a field from a register given it's name. defines
+ * for the LSB (__S - shift count) and bitmask (__M) are required
+ *
+ * EXTRACT(u, f) - extracts the field and places it at bit position 0
+ * GEN_MASK(f) - creates an in-position mask for the field
+ */
+#define EXTRACT(u, f) (((u) >> f##__S) & f##__M)
+#define GEN_MASK(f) ((u64)f##__M << f##__S)
+
+/*
+ * err_common.c
+ */
+extern char *err_print_prefix;
+
+extern void mchk_dump_mem(void *, size_t, char **);
+extern void mchk_dump_logout_frame(struct el_common *);
+extern void el_print_timestamp(union el_timestamp *);
+extern void el_process_subpackets(struct el_subpacket *, int);
+extern struct el_subpacket *el_process_subpacket(struct el_subpacket *);
+extern void el_annotate_subpacket(struct el_subpacket *);
+extern void cdl_check_console_data_log(void);
+extern int cdl_register_subpacket_annotation(struct el_subpacket_annotation *);
+extern int cdl_register_subpacket_handler(struct el_subpacket_handler *);
+
+/*
+ * err_ev7.c
+ */
+extern struct ev7_lf_subpackets *
+ev7_collect_logout_frame_subpackets(struct el_subpacket *,
+ struct ev7_lf_subpackets *);
+extern void ev7_register_error_handlers(void);
+extern void ev7_machine_check(u64, u64, struct pt_regs *);
+
+/*
+ * err_ev6.c
+ */
+extern void ev6_register_error_handlers(void);
+extern int ev6_process_logout_frame(struct el_common *, int);
+extern void ev6_machine_check(u64, u64, struct pt_regs *);
+
+/*
+ * err_marvel.c
+ */
+extern void marvel_machine_check(u64, u64, struct pt_regs *);
+extern void marvel_register_error_handlers(void);
+
+/*
+ * err_titan.c
+ */
+extern int titan_process_logout_frame(struct el_common *, int);
+extern void titan_machine_check(u64, u64, struct pt_regs *);
+extern void titan_register_error_handlers(void);
+extern int privateer_process_logout_frame(struct el_common *, int);
+extern void privateer_machine_check(u64, u64, struct pt_regs *);
diff --git a/arch/alpha/kernel/err_marvel.c b/arch/alpha/kernel/err_marvel.c
new file mode 100644
index 000000000000..70b38b1d2af3
--- /dev/null
+++ b/arch/alpha/kernel/err_marvel.c
@@ -0,0 +1,1159 @@
+/*
+ * linux/arch/alpha/kernel/err_marvel.c
+ *
+ * Copyright (C) 2001 Jeff Wiedemeier (Compaq Computer Corporation)
+ *
+ */
+
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/sched.h>
+
+#include <asm/io.h>
+#include <asm/console.h>
+#include <asm/core_marvel.h>
+#include <asm/hwrpb.h>
+#include <asm/smp.h>
+#include <asm/err_common.h>
+#include <asm/err_ev7.h>
+
+#include "err_impl.h"
+#include "proto.h"
+
+static void
+marvel_print_680_frame(struct ev7_lf_subpackets *lf_subpackets)
+{
+#ifdef CONFIG_VERBOSE_MCHECK
+ struct ev7_pal_environmental_subpacket *env;
+ struct { int type; char *name; } ev_packets[] = {
+ { EL_TYPE__PAL__ENV__AMBIENT_TEMPERATURE,
+ "Ambient Temperature" },
+ { EL_TYPE__PAL__ENV__AIRMOVER_FAN,
+ "AirMover / Fan" },
+ { EL_TYPE__PAL__ENV__VOLTAGE,
+ "Voltage" },
+ { EL_TYPE__PAL__ENV__INTRUSION,
+ "Intrusion" },
+ { EL_TYPE__PAL__ENV__POWER_SUPPLY,
+ "Power Supply" },
+ { EL_TYPE__PAL__ENV__LAN,
+ "LAN" },
+ { EL_TYPE__PAL__ENV__HOT_PLUG,
+ "Hot Plug" },
+ { 0, NULL }
+ };
+ int i;
+
+ for (i = 0; ev_packets[i].type != 0; i++) {
+ env = lf_subpackets->env[ev7_lf_env_index(ev_packets[i].type)];
+ if (!env)
+ continue;
+
+ printk("%s**%s event (cabinet %d, drawer %d)\n",
+ err_print_prefix,
+ ev_packets[i].name,
+ env->cabinet,
+ env->drawer);
+ printk("%s Module Type: 0x%x - Unit ID 0x%x - "
+ "Condition 0x%x\n",
+ err_print_prefix,
+ env->module_type,
+ env->unit_id,
+ env->condition);
+ }
+#endif /* CONFIG_VERBOSE_MCHECK */
+}
+
+static int
+marvel_process_680_frame(struct ev7_lf_subpackets *lf_subpackets, int print)
+{
+ int status = MCHK_DISPOSITION_UNKNOWN_ERROR;
+ int i;
+
+ for (i = ev7_lf_env_index(EL_TYPE__PAL__ENV__AMBIENT_TEMPERATURE);
+ i <= ev7_lf_env_index(EL_TYPE__PAL__ENV__HOT_PLUG);
+ i++) {
+ if (lf_subpackets->env[i])
+ status = MCHK_DISPOSITION_REPORT;
+ }
+
+ if (print)
+ marvel_print_680_frame(lf_subpackets);
+
+ return status;
+}
+
+#ifdef CONFIG_VERBOSE_MCHECK
+
+static void
+marvel_print_err_cyc(u64 err_cyc)
+{
+ static char *packet_desc[] = {
+ "No Error",
+ "UNKNOWN",
+ "1 cycle (1 or 2 flit packet)",
+ "2 cycles (3 flit packet)",
+ "9 cycles (18 flit packet)",
+ "10 cycles (19 flit packet)",
+ "UNKNOWN",
+ "UNKNOWN",
+ "UNKNOWN"
+ };
+
+#define IO7__ERR_CYC__ODD_FLT (1UL << 0)
+#define IO7__ERR_CYC__EVN_FLT (1UL << 1)
+#define IO7__ERR_CYC__PACKET__S (6)
+#define IO7__ERR_CYC__PACKET__M (0x7)
+#define IO7__ERR_CYC__LOC (1UL << 5)
+#define IO7__ERR_CYC__CYCLE__S (2)
+#define IO7__ERR_CYC__CYCLE__M (0x7)
+
+ printk("%s Packet In Error: %s\n"
+ "%s Error in %s, cycle %ld%s%s\n",
+ err_print_prefix,
+ packet_desc[EXTRACT(err_cyc, IO7__ERR_CYC__PACKET)],
+ err_print_prefix,
+ (err_cyc & IO7__ERR_CYC__LOC) ? "DATA" : "HEADER",
+ EXTRACT(err_cyc, IO7__ERR_CYC__CYCLE),
+ (err_cyc & IO7__ERR_CYC__ODD_FLT) ? " [ODD Flit]": "",
+ (err_cyc & IO7__ERR_CYC__EVN_FLT) ? " [Even Flit]": "");
+}
+
+static void
+marvel_print_po7_crrct_sym(u64 crrct_sym)
+{
+#define IO7__PO7_CRRCT_SYM__SYN__S (0)
+#define IO7__PO7_CRRCT_SYM__SYN__M (0x7f)
+#define IO7__PO7_CRRCT_SYM__ERR_CYC__S (7) /* ERR_CYC + ODD_FLT + EVN_FLT */
+#define IO7__PO7_CRRCT_SYM__ERR_CYC__M (0x1ff)
+
+
+ printk("%s Correctable Error Symptoms:\n"
+ "%s Syndrome: 0x%lx\n",
+ err_print_prefix,
+ err_print_prefix, EXTRACT(crrct_sym, IO7__PO7_CRRCT_SYM__SYN));
+ marvel_print_err_cyc(EXTRACT(crrct_sym, IO7__PO7_CRRCT_SYM__ERR_CYC));
+}
+
+static void
+marvel_print_po7_uncrr_sym(u64 uncrr_sym, u64 valid_mask)
+{
+ static char *clk_names[] = { "_h[0]", "_h[1]", "_n[0]", "_n[1]" };
+ static char *clk_decode[] = {
+ "No Error",
+ "One extra rising edge",
+ "Two extra rising edges",
+ "Lost one clock"
+ };
+ static char *port_names[] = { "Port 0", "Port 1",
+ "Port 2", "Port 3",
+ "Unknown Port", "Unknown Port",
+ "Unknown Port", "Port 7" };
+ int scratch, i;
+
+#define IO7__PO7_UNCRR_SYM__SYN__S (0)
+#define IO7__PO7_UNCRR_SYM__SYN__M (0x7f)
+#define IO7__PO7_UNCRR_SYM__ERR_CYC__S (7) /* ERR_CYC + ODD_FLT... */
+#define IO7__PO7_UNCRR_SYM__ERR_CYC__M (0x1ff) /* ... + EVN_FLT */
+#define IO7__PO7_UNCRR_SYM__CLK__S (16)
+#define IO7__PO7_UNCRR_SYM__CLK__M (0xff)
+#define IO7__PO7_UNCRR_SYM__CDT_OVF_TO__REQ (1UL << 24)
+#define IO7__PO7_UNCRR_SYM__CDT_OVF_TO__RIO (1UL << 25)
+#define IO7__PO7_UNCRR_SYM__CDT_OVF_TO__WIO (1UL << 26)
+#define IO7__PO7_UNCRR_SYM__CDT_OVF_TO__BLK (1UL << 27)
+#define IO7__PO7_UNCRR_SYM__CDT_OVF_TO__NBK (1UL << 28)
+#define IO7__PO7_UNCRR_SYM__OVF__READIO (1UL << 29)
+#define IO7__PO7_UNCRR_SYM__OVF__WRITEIO (1UL << 30)
+#define IO7__PO7_UNCRR_SYM__OVF__FWD (1UL << 31)
+#define IO7__PO7_UNCRR_SYM__VICTIM_SP__S (32)
+#define IO7__PO7_UNCRR_SYM__VICTIM_SP__M (0xff)
+#define IO7__PO7_UNCRR_SYM__DETECT_SP__S (40)
+#define IO7__PO7_UNCRR_SYM__DETECT_SP__M (0xff)
+#define IO7__PO7_UNCRR_SYM__STRV_VTR__S (48)
+#define IO7__PO7_UNCRR_SYM__STRV_VTR__M (0x3ff)
+
+#define IO7__STRV_VTR__LSI__INTX__S (0)
+#define IO7__STRV_VTR__LSI__INTX__M (0x3)
+#define IO7__STRV_VTR__LSI__SLOT__S (2)
+#define IO7__STRV_VTR__LSI__SLOT__M (0x7)
+#define IO7__STRV_VTR__LSI__BUS__S (5)
+#define IO7__STRV_VTR__LSI__BUS__M (0x3)
+#define IO7__STRV_VTR__MSI__INTNUM__S (0)
+#define IO7__STRV_VTR__MSI__INTNUM__M (0x1ff)
+#define IO7__STRV_VTR__IS_MSI (1UL << 9)
+
+ printk("%s Uncorrectable Error Symptoms:\n", err_print_prefix);
+ uncrr_sym &= valid_mask;
+
+ if (EXTRACT(valid_mask, IO7__PO7_UNCRR_SYM__SYN))
+ printk("%s Syndrome: 0x%lx\n",
+ err_print_prefix,
+ EXTRACT(uncrr_sym, IO7__PO7_UNCRR_SYM__SYN));
+
+ if (EXTRACT(valid_mask, IO7__PO7_UNCRR_SYM__ERR_CYC))
+ marvel_print_err_cyc(EXTRACT(uncrr_sym,
+ IO7__PO7_UNCRR_SYM__ERR_CYC));
+
+ scratch = EXTRACT(uncrr_sym, IO7__PO7_UNCRR_SYM__CLK);
+ for (i = 0; i < 4; i++, scratch >>= 2) {
+ if (scratch & 0x3)
+ printk("%s Clock %s: %s\n",
+ err_print_prefix,
+ clk_names[i], clk_decode[scratch & 0x3]);
+ }
+
+ if (uncrr_sym & IO7__PO7_UNCRR_SYM__CDT_OVF_TO__REQ)
+ printk("%s REQ Credit Timeout or Overflow\n",
+ err_print_prefix);
+ if (uncrr_sym & IO7__PO7_UNCRR_SYM__CDT_OVF_TO__RIO)
+ printk("%s RIO Credit Timeout or Overflow\n",
+ err_print_prefix);
+ if (uncrr_sym & IO7__PO7_UNCRR_SYM__CDT_OVF_TO__WIO)
+ printk("%s WIO Credit Timeout or Overflow\n",
+ err_print_prefix);
+ if (uncrr_sym & IO7__PO7_UNCRR_SYM__CDT_OVF_TO__BLK)
+ printk("%s BLK Credit Timeout or Overflow\n",
+ err_print_prefix);
+ if (uncrr_sym & IO7__PO7_UNCRR_SYM__CDT_OVF_TO__NBK)
+ printk("%s NBK Credit Timeout or Overflow\n",
+ err_print_prefix);
+
+ if (uncrr_sym & IO7__PO7_UNCRR_SYM__OVF__READIO)
+ printk("%s Read I/O Buffer Overflow\n",
+ err_print_prefix);
+ if (uncrr_sym & IO7__PO7_UNCRR_SYM__OVF__WRITEIO)
+ printk("%s Write I/O Buffer Overflow\n",
+ err_print_prefix);
+ if (uncrr_sym & IO7__PO7_UNCRR_SYM__OVF__FWD)
+ printk("%s FWD Buffer Overflow\n",
+ err_print_prefix);
+
+ if ((scratch = EXTRACT(uncrr_sym, IO7__PO7_UNCRR_SYM__VICTIM_SP))) {
+ int lost = scratch & (1UL << 4);
+ scratch &= ~lost;
+ for (i = 0; i < 8; i++, scratch >>= 1) {
+ if (!(scratch & 1))
+ continue;
+ printk("%s Error Response sent to %s",
+ err_print_prefix, port_names[i]);
+ }
+ if (lost)
+ printk("%s Lost Error sent somewhere else\n",
+ err_print_prefix);
+ }
+
+ if ((scratch = EXTRACT(uncrr_sym, IO7__PO7_UNCRR_SYM__DETECT_SP))) {
+ for (i = 0; i < 8; i++, scratch >>= 1) {
+ if (!(scratch & 1))
+ continue;
+ printk("%s Error Reported by %s",
+ err_print_prefix, port_names[i]);
+ }
+ }
+
+ if (EXTRACT(valid_mask, IO7__PO7_UNCRR_SYM__STRV_VTR)) {
+ char starvation_message[80];
+
+ scratch = EXTRACT(uncrr_sym, IO7__PO7_UNCRR_SYM__STRV_VTR);
+ if (scratch & IO7__STRV_VTR__IS_MSI)
+ sprintf(starvation_message,
+ "MSI Interrupt 0x%x",
+ EXTRACT(scratch, IO7__STRV_VTR__MSI__INTNUM));
+ else
+ sprintf(starvation_message,
+ "LSI INT%c for Bus:Slot (%d:%d)\n",
+ 'A' + EXTRACT(scratch,
+ IO7__STRV_VTR__LSI__INTX),
+ EXTRACT(scratch, IO7__STRV_VTR__LSI__BUS),
+ EXTRACT(scratch, IO7__STRV_VTR__LSI__SLOT));
+
+ printk("%s Starvation Int Trigger By: %s\n",
+ err_print_prefix, starvation_message);
+ }
+}
+
+static void
+marvel_print_po7_ugbge_sym(u64 ugbge_sym)
+{
+ char opcode_str[10];
+
+#define IO7__PO7_UGBGE_SYM__UPH_PKT_OFF__S (6)
+#define IO7__PO7_UGBGE_SYM__UPH_PKT_OFF__M (0xfffffffful)
+#define IO7__PO7_UGBGE_SYM__UPH_OPCODE__S (40)
+#define IO7__PO7_UGBGE_SYM__UPH_OPCODE__M (0xff)
+#define IO7__PO7_UGBGE_SYM__UPH_SRC_PORT__S (48)
+#define IO7__PO7_UGBGE_SYM__UPH_SRC_PORT__M (0xf)
+#define IO7__PO7_UGBGE_SYM__UPH_DEST_PID__S (52)
+#define IO7__PO7_UGBGE_SYM__UPH_DEST_PID__M (0x7ff)
+#define IO7__PO7_UGBGE_SYM__VALID (1UL << 63)
+
+ if (!(ugbge_sym & IO7__PO7_UGBGE_SYM__VALID))
+ return;
+
+ switch(EXTRACT(ugbge_sym, IO7__PO7_UGBGE_SYM__UPH_OPCODE)) {
+ case 0x51:
+ sprintf(opcode_str, "Wr32");
+ break;
+ case 0x50:
+ sprintf(opcode_str, "WrQW");
+ break;
+ case 0x54:
+ sprintf(opcode_str, "WrIPR");
+ break;
+ case 0xD8:
+ sprintf(opcode_str, "Victim");
+ break;
+ case 0xC5:
+ sprintf(opcode_str, "BlkIO");
+ break;
+ default:
+ sprintf(opcode_str, "0x%lx\n",
+ EXTRACT(ugbge_sym, IO7__PO7_UGBGE_SYM__UPH_OPCODE));
+ break;
+ }
+
+ printk("%s Up Hose Garbage Symptom:\n"
+ "%s Source Port: %ld - Dest PID: %ld - OpCode: %s\n",
+ err_print_prefix,
+ err_print_prefix,
+ EXTRACT(ugbge_sym, IO7__PO7_UGBGE_SYM__UPH_SRC_PORT),
+ EXTRACT(ugbge_sym, IO7__PO7_UGBGE_SYM__UPH_DEST_PID),
+ opcode_str);
+
+ if (0xC5 != EXTRACT(ugbge_sym, IO7__PO7_UGBGE_SYM__UPH_OPCODE))
+ printk("%s Packet Offset 0x%08lx\n",
+ err_print_prefix,
+ EXTRACT(ugbge_sym, IO7__PO7_UGBGE_SYM__UPH_PKT_OFF));
+}
+
+static void
+marvel_print_po7_err_sum(struct ev7_pal_io_subpacket *io)
+{
+ u64 uncrr_sym_valid = 0;
+
+#define IO7__PO7_ERRSUM__CR_SBE (1UL << 32)
+#define IO7__PO7_ERRSUM__CR_SBE2 (1UL << 33)
+#define IO7__PO7_ERRSUM__CR_PIO_WBYTE (1UL << 34)
+#define IO7__PO7_ERRSUM__CR_CSR_NXM (1UL << 35)
+#define IO7__PO7_ERRSUM__CR_RPID_ACV (1UL << 36)
+#define IO7__PO7_ERRSUM__CR_RSP_NXM (1UL << 37)
+#define IO7__PO7_ERRSUM__CR_ERR_RESP (1UL << 38)
+#define IO7__PO7_ERRSUM__CR_CLK_DERR (1UL << 39)
+#define IO7__PO7_ERRSUM__CR_DAT_DBE (1UL << 40)
+#define IO7__PO7_ERRSUM__CR_DAT_GRBG (1UL << 41)
+#define IO7__PO7_ERRSUM__MAF_TO (1UL << 42)
+#define IO7__PO7_ERRSUM__UGBGE (1UL << 43)
+#define IO7__PO7_ERRSUM__UN_MAF_LOST (1UL << 44)
+#define IO7__PO7_ERRSUM__UN_PKT_OVF (1UL << 45)
+#define IO7__PO7_ERRSUM__UN_CDT_OVF (1UL << 46)
+#define IO7__PO7_ERRSUM__UN_DEALLOC (1UL << 47)
+#define IO7__PO7_ERRSUM__BH_CDT_TO (1UL << 51)
+#define IO7__PO7_ERRSUM__BH_CLK_HDR (1UL << 52)
+#define IO7__PO7_ERRSUM__BH_DBE_HDR (1UL << 53)
+#define IO7__PO7_ERRSUM__BH_GBG_HDR (1UL << 54)
+#define IO7__PO7_ERRSUM__BH_BAD_CMD (1UL << 55)
+#define IO7__PO7_ERRSUM__HLT_INT (1UL << 56)
+#define IO7__PO7_ERRSUM__HP_INT (1UL << 57)
+#define IO7__PO7_ERRSUM__CRD_INT (1UL << 58)
+#define IO7__PO7_ERRSUM__STV_INT (1UL << 59)
+#define IO7__PO7_ERRSUM__HRD_INT (1UL << 60)
+#define IO7__PO7_ERRSUM__BH_SUM (1UL << 61)
+#define IO7__PO7_ERRSUM__ERR_LST (1UL << 62)
+#define IO7__PO7_ERRSUM__ERR_VALID (1UL << 63)
+
+#define IO7__PO7_ERRSUM__ERR_MASK (IO7__PO7_ERRSUM__ERR_VALID | \
+ IO7__PO7_ERRSUM__CR_SBE)
+
+ /*
+ * Single bit errors aren't covered by ERR_VALID.
+ */
+ if (io->po7_error_sum & IO7__PO7_ERRSUM__CR_SBE) {
+ printk("%s %sSingle Bit Error(s) detected/corrected\n",
+ err_print_prefix,
+ (io->po7_error_sum & IO7__PO7_ERRSUM__CR_SBE2)
+ ? "Multiple " : "");
+ marvel_print_po7_crrct_sym(io->po7_crrct_sym);
+ }
+
+ /*
+ * Neither are the interrupt status bits
+ */
+ if (io->po7_error_sum & IO7__PO7_ERRSUM__HLT_INT)
+ printk("%s Halt Interrupt posted", err_print_prefix);
+ if (io->po7_error_sum & IO7__PO7_ERRSUM__HP_INT) {
+ printk("%s Hot Plug Event Interrupt posted",
+ err_print_prefix);
+ uncrr_sym_valid |= GEN_MASK(IO7__PO7_UNCRR_SYM__DETECT_SP);
+ }
+ if (io->po7_error_sum & IO7__PO7_ERRSUM__CRD_INT)
+ printk("%s Correctable Error Interrupt posted",
+ err_print_prefix);
+ if (io->po7_error_sum & IO7__PO7_ERRSUM__STV_INT) {
+ printk("%s Starvation Interrupt posted", err_print_prefix);
+ uncrr_sym_valid |= GEN_MASK(IO7__PO7_UNCRR_SYM__STRV_VTR);
+ }
+ if (io->po7_error_sum & IO7__PO7_ERRSUM__HRD_INT) {
+ printk("%s Hard Error Interrupt posted", err_print_prefix);
+ uncrr_sym_valid |= GEN_MASK(IO7__PO7_UNCRR_SYM__DETECT_SP);
+ }
+
+ /*
+ * Everything else is valid only with ERR_VALID, so skip to the end
+ * (uncrr_sym check) unless ERR_VALID is set.
+ */
+ if (!(io->po7_error_sum & IO7__PO7_ERRSUM__ERR_VALID))
+ goto check_uncrr_sym;
+
+ /*
+ * Since ERR_VALID is set, VICTIM_SP in uncrr_sym is valid.
+ * For bits [29:0] to also be valid, the following bits must
+ * not be set:
+ * CR_PIO_WBYTE CR_CSR_NXM CR_RSP_NXM
+ * CR_ERR_RESP MAF_TO
+ */
+ uncrr_sym_valid |= GEN_MASK(IO7__PO7_UNCRR_SYM__VICTIM_SP);
+ if (!(io->po7_error_sum & (IO7__PO7_ERRSUM__CR_PIO_WBYTE |
+ IO7__PO7_ERRSUM__CR_CSR_NXM |
+ IO7__PO7_ERRSUM__CR_RSP_NXM |
+ IO7__PO7_ERRSUM__CR_ERR_RESP |
+ IO7__PO7_ERRSUM__MAF_TO)))
+ uncrr_sym_valid |= 0x3ffffffful;
+
+ if (io->po7_error_sum & IO7__PO7_ERRSUM__CR_PIO_WBYTE)
+ printk("%s Write byte into IO7 CSR\n", err_print_prefix);
+ if (io->po7_error_sum & IO7__PO7_ERRSUM__CR_CSR_NXM)
+ printk("%s PIO to non-existent CSR\n", err_print_prefix);
+ if (io->po7_error_sum & IO7__PO7_ERRSUM__CR_RPID_ACV)
+ printk("%s Bus Requester PID (Access Violation)\n",
+ err_print_prefix);
+ if (io->po7_error_sum & IO7__PO7_ERRSUM__CR_RSP_NXM)
+ printk("%s Received NXM response from EV7\n",
+ err_print_prefix);
+ if (io->po7_error_sum & IO7__PO7_ERRSUM__CR_ERR_RESP)
+ printk("%s Received ERROR RESPONSE\n", err_print_prefix);
+ if (io->po7_error_sum & IO7__PO7_ERRSUM__CR_CLK_DERR)
+ printk("%s Clock error on data flit\n", err_print_prefix);
+ if (io->po7_error_sum & IO7__PO7_ERRSUM__CR_DAT_DBE)
+ printk("%s Double Bit Error Data Error Detected\n",
+ err_print_prefix);
+ if (io->po7_error_sum & IO7__PO7_ERRSUM__CR_DAT_GRBG)
+ printk("%s Garbage Encoding Detected on the data\n",
+ err_print_prefix);
+ if (io->po7_error_sum & IO7__PO7_ERRSUM__UGBGE) {
+ printk("%s Garbage Encoding sent up hose\n",
+ err_print_prefix);
+ marvel_print_po7_ugbge_sym(io->po7_ugbge_sym);
+ }
+ if (io->po7_error_sum & IO7__PO7_ERRSUM__UN_MAF_LOST)
+ printk("%s Orphan response (unexpected response)\n",
+ err_print_prefix);
+ if (io->po7_error_sum & IO7__PO7_ERRSUM__UN_PKT_OVF)
+ printk("%s Down hose packet overflow\n", err_print_prefix);
+ if (io->po7_error_sum & IO7__PO7_ERRSUM__UN_CDT_OVF)
+ printk("%s Down hose credit overflow\n", err_print_prefix);
+ if (io->po7_error_sum & IO7__PO7_ERRSUM__UN_DEALLOC)
+ printk("%s Unexpected or bad dealloc field\n",
+ err_print_prefix);
+
+ /*
+ * The black hole events.
+ */
+ if (io->po7_error_sum & IO7__PO7_ERRSUM__MAF_TO)
+ printk("%s BLACK HOLE: Timeout for all responses\n",
+ err_print_prefix);
+ if (io->po7_error_sum & IO7__PO7_ERRSUM__BH_CDT_TO)
+ printk("%s BLACK HOLE: Credit Timeout\n", err_print_prefix);
+ if (io->po7_error_sum & IO7__PO7_ERRSUM__BH_CLK_HDR)
+ printk("%s BLACK HOLE: Clock check on header\n",
+ err_print_prefix);
+ if (io->po7_error_sum & IO7__PO7_ERRSUM__BH_DBE_HDR)
+ printk("%s BLACK HOLE: Uncorrectable Error on header\n",
+ err_print_prefix);
+ if (io->po7_error_sum & IO7__PO7_ERRSUM__BH_GBG_HDR)
+ printk("%s BLACK HOLE: Garbage on header\n",
+ err_print_prefix);
+ if (io->po7_error_sum & IO7__PO7_ERRSUM__BH_BAD_CMD)
+ printk("%s BLACK HOLE: Bad EV7 command\n",
+ err_print_prefix);
+
+ if (io->po7_error_sum & IO7__PO7_ERRSUM__ERR_LST)
+ printk("%s Lost Error\n", err_print_prefix);
+
+ printk("%s Failing Packet:\n"
+ "%s Cycle 1: %016lx\n"
+ "%s Cycle 2: %016lx\n",
+ err_print_prefix,
+ err_print_prefix, io->po7_err_pkt0,
+ err_print_prefix, io->po7_err_pkt1);
+ /*
+ * If there are any valid bits in UNCRR sym for this err,
+ * print UNCRR_SYM as well.
+ */
+check_uncrr_sym:
+ if (uncrr_sym_valid)
+ marvel_print_po7_uncrr_sym(io->po7_uncrr_sym, uncrr_sym_valid);
+}
+
+static void
+marvel_print_pox_tlb_err(u64 tlb_err)
+{
+ static char *tlb_errors[] = {
+ "No Error",
+ "North Port Signaled Error fetching TLB entry",
+ "PTE invalid or UCC or GBG error on this entry",
+ "Address did not hit any DMA window"
+ };
+
+#define IO7__POX_TLBERR__ERR_VALID (1UL << 63)
+#define IO7__POX_TLBERR__ERRCODE__S (0)
+#define IO7__POX_TLBERR__ERRCODE__M (0x3)
+#define IO7__POX_TLBERR__ERR_TLB_PTR__S (3)
+#define IO7__POX_TLBERR__ERR_TLB_PTR__M (0x7)
+#define IO7__POX_TLBERR__FADDR__S (6)
+#define IO7__POX_TLBERR__FADDR__M (0x3fffffffffful)
+
+ if (!(tlb_err & IO7__POX_TLBERR__ERR_VALID))
+ return;
+
+ printk("%s TLB Error on index 0x%lx:\n"
+ "%s - %s\n"
+ "%s - Addr: 0x%016lx\n",
+ err_print_prefix,
+ EXTRACT(tlb_err, IO7__POX_TLBERR__ERR_TLB_PTR),
+ err_print_prefix,
+ tlb_errors[EXTRACT(tlb_err, IO7__POX_TLBERR__ERRCODE)],
+ err_print_prefix,
+ EXTRACT(tlb_err, IO7__POX_TLBERR__FADDR) << 6);
+}
+
+static void
+marvel_print_pox_spl_cmplt(u64 spl_cmplt)
+{
+ char message[80];
+
+#define IO7__POX_SPLCMPLT__MESSAGE__S (0)
+#define IO7__POX_SPLCMPLT__MESSAGE__M (0x0fffffffful)
+#define IO7__POX_SPLCMPLT__SOURCE_BUS__S (40)
+#define IO7__POX_SPLCMPLT__SOURCE_BUS__M (0xfful)
+#define IO7__POX_SPLCMPLT__SOURCE_DEV__S (35)
+#define IO7__POX_SPLCMPLT__SOURCE_DEV__M (0x1ful)
+#define IO7__POX_SPLCMPLT__SOURCE_FUNC__S (32)
+#define IO7__POX_SPLCMPLT__SOURCE_FUNC__M (0x07ul)
+
+#define IO7__POX_SPLCMPLT__MSG_CLASS__S (28)
+#define IO7__POX_SPLCMPLT__MSG_CLASS__M (0xf)
+#define IO7__POX_SPLCMPLT__MSG_INDEX__S (20)
+#define IO7__POX_SPLCMPLT__MSG_INDEX__M (0xff)
+#define IO7__POX_SPLCMPLT__MSG_CLASSINDEX__S (20)
+#define IO7__POX_SPLCMPLT__MSG_CLASSINDEX__M (0xfff)
+#define IO7__POX_SPLCMPLT__REM_LOWER_ADDR__S (12)
+#define IO7__POX_SPLCMPLT__REM_LOWER_ADDR__M (0x7f)
+#define IO7__POX_SPLCMPLT__REM_BYTE_COUNT__S (0)
+#define IO7__POX_SPLCMPLT__REM_BYTE_COUNT__M (0xfff)
+
+ printk("%s Split Completion Error:\n"
+ "%s Source (Bus:Dev:Func): %ld:%ld:%ld\n",
+ err_print_prefix,
+ err_print_prefix,
+ EXTRACT(spl_cmplt, IO7__POX_SPLCMPLT__SOURCE_BUS),
+ EXTRACT(spl_cmplt, IO7__POX_SPLCMPLT__SOURCE_DEV),
+ EXTRACT(spl_cmplt, IO7__POX_SPLCMPLT__SOURCE_FUNC));
+
+ switch(EXTRACT(spl_cmplt, IO7__POX_SPLCMPLT__MSG_CLASSINDEX)) {
+ case 0x000:
+ sprintf(message, "Normal completion");
+ break;
+ case 0x100:
+ sprintf(message, "Bridge - Master Abort");
+ break;
+ case 0x101:
+ sprintf(message, "Bridge - Target Abort");
+ break;
+ case 0x102:
+ sprintf(message, "Bridge - Uncorrectable Write Data Error");
+ break;
+ case 0x200:
+ sprintf(message, "Byte Count Out of Range");
+ break;
+ case 0x201:
+ sprintf(message, "Uncorrectable Split Write Data Error");
+ break;
+ default:
+ sprintf(message, "%08lx\n",
+ EXTRACT(spl_cmplt, IO7__POX_SPLCMPLT__MESSAGE));
+ break;
+ }
+ printk("%s Message: %s\n", err_print_prefix, message);
+}
+
+static void
+marvel_print_pox_trans_sum(u64 trans_sum)
+{
+ char *pcix_cmd[] = { "Interrupt Acknowledge",
+ "Special Cycle",
+ "I/O Read",
+ "I/O Write",
+ "Reserved",
+ "Reserved / Device ID Message",
+ "Memory Read",
+ "Memory Write",
+ "Reserved / Alias to Memory Read Block",
+ "Reserved / Alias to Memory Write Block",
+ "Configuration Read",
+ "Configuration Write",
+ "Memory Read Multiple / Split Completion",
+ "Dual Address Cycle",
+ "Memory Read Line / Memory Read Block",
+ "Memory Write and Invalidate / Memory Write Block"
+ };
+
+#define IO7__POX_TRANSUM__PCI_ADDR__S (0)
+#define IO7__POX_TRANSUM__PCI_ADDR__M (0x3fffffffffffful)
+#define IO7__POX_TRANSUM__DAC (1UL << 50)
+#define IO7__POX_TRANSUM__PCIX_MASTER_SLOT__S (52)
+#define IO7__POX_TRANSUM__PCIX_MASTER_SLOT__M (0xf)
+#define IO7__POX_TRANSUM__PCIX_CMD__S (56)
+#define IO7__POX_TRANSUM__PCIX_CMD__M (0xf)
+#define IO7__POX_TRANSUM__ERR_VALID (1UL << 63)
+
+ if (!(trans_sum & IO7__POX_TRANSUM__ERR_VALID))
+ return;
+
+ printk("%s Transaction Summary:\n"
+ "%s Command: 0x%lx - %s\n"
+ "%s Address: 0x%016lx%s\n"
+ "%s PCI-X Master Slot: 0x%lx\n",
+ err_print_prefix,
+ err_print_prefix,
+ EXTRACT(trans_sum, IO7__POX_TRANSUM__PCIX_CMD),
+ pcix_cmd[EXTRACT(trans_sum, IO7__POX_TRANSUM__PCIX_CMD)],
+ err_print_prefix,
+ EXTRACT(trans_sum, IO7__POX_TRANSUM__PCI_ADDR),
+ (trans_sum & IO7__POX_TRANSUM__DAC) ? " (DAC)" : "",
+ err_print_prefix,
+ EXTRACT(trans_sum, IO7__POX_TRANSUM__PCIX_MASTER_SLOT));
+}
+
+static void
+marvel_print_pox_err(u64 err_sum, struct ev7_pal_io_one_port *port)
+{
+#define IO7__POX_ERRSUM__AGP_REQQ_OVFL (1UL << 4)
+#define IO7__POX_ERRSUM__AGP_SYNC_ERR (1UL << 5)
+#define IO7__POX_ERRSUM__MRETRY_TO (1UL << 6)
+#define IO7__POX_ERRSUM__PCIX_UX_SPL (1UL << 7)
+#define IO7__POX_ERRSUM__PCIX_SPLIT_TO (1UL << 8)
+#define IO7__POX_ERRSUM__PCIX_DISCARD_SPL (1UL << 9)
+#define IO7__POX_ERRSUM__DMA_RD_TO (1UL << 10)
+#define IO7__POX_ERRSUM__CSR_NXM_RD (1UL << 11)
+#define IO7__POX_ERRSUM__CSR_NXM_WR (1UL << 12)
+#define IO7__POX_ERRSUM__DMA_TO (1UL << 13)
+#define IO7__POX_ERRSUM__ALL_MABORTS (1UL << 14)
+#define IO7__POX_ERRSUM__MABORT (1UL << 15)
+#define IO7__POX_ERRSUM__MABORT_MASK (IO7__POX_ERRSUM__ALL_MABORTS|\
+ IO7__POX_ERRSUM__MABORT)
+#define IO7__POX_ERRSUM__PT_TABORT (1UL << 16)
+#define IO7__POX_ERRSUM__PM_TABORT (1UL << 17)
+#define IO7__POX_ERRSUM__TABORT_MASK (IO7__POX_ERRSUM__PT_TABORT | \
+ IO7__POX_ERRSUM__PM_TABORT)
+#define IO7__POX_ERRSUM__SERR (1UL << 18)
+#define IO7__POX_ERRSUM__ADDRERR_STB (1UL << 19)
+#define IO7__POX_ERRSUM__DETECTED_SERR (1UL << 20)
+#define IO7__POX_ERRSUM__PERR (1UL << 21)
+#define IO7__POX_ERRSUM__DATAERR_STB_NIOW (1UL << 22)
+#define IO7__POX_ERRSUM__DETECTED_PERR (1UL << 23)
+#define IO7__POX_ERRSUM__PM_PERR (1UL << 24)
+#define IO7__POX_ERRSUM__PT_SCERROR (1UL << 26)
+#define IO7__POX_ERRSUM__HUNG_BUS (1UL << 28)
+#define IO7__POX_ERRSUM__UPE_ERROR__S (51)
+#define IO7__POX_ERRSUM__UPE_ERROR__M (0xffUL)
+#define IO7__POX_ERRSUM__UPE_ERROR GEN_MASK(IO7__POX_ERRSUM__UPE_ERROR)
+#define IO7__POX_ERRSUM__TLB_ERR (1UL << 59)
+#define IO7__POX_ERRSUM__ERR_VALID (1UL << 63)
+
+#define IO7__POX_ERRSUM__TRANS_SUM__MASK (IO7__POX_ERRSUM__MRETRY_TO | \
+ IO7__POX_ERRSUM__PCIX_UX_SPL | \
+ IO7__POX_ERRSUM__PCIX_SPLIT_TO | \
+ IO7__POX_ERRSUM__DMA_TO | \
+ IO7__POX_ERRSUM__MABORT_MASK | \
+ IO7__POX_ERRSUM__TABORT_MASK | \
+ IO7__POX_ERRSUM__SERR | \
+ IO7__POX_ERRSUM__ADDRERR_STB | \
+ IO7__POX_ERRSUM__PERR | \
+ IO7__POX_ERRSUM__DATAERR_STB_NIOW |\
+ IO7__POX_ERRSUM__DETECTED_PERR | \
+ IO7__POX_ERRSUM__PM_PERR | \
+ IO7__POX_ERRSUM__PT_SCERROR | \
+ IO7__POX_ERRSUM__UPE_ERROR)
+
+ if (!(err_sum & IO7__POX_ERRSUM__ERR_VALID))
+ return;
+
+ /*
+ * First the transaction summary errors
+ */
+ if (err_sum & IO7__POX_ERRSUM__MRETRY_TO)
+ printk("%s IO7 Master Retry Timeout expired\n",
+ err_print_prefix);
+ if (err_sum & IO7__POX_ERRSUM__PCIX_UX_SPL)
+ printk("%s Unexpected Split Completion\n",
+ err_print_prefix);
+ if (err_sum & IO7__POX_ERRSUM__PCIX_SPLIT_TO)
+ printk("%s IO7 Split Completion Timeout expired\n",
+ err_print_prefix);
+ if (err_sum & IO7__POX_ERRSUM__DMA_TO)
+ printk("%s Hung bus during DMA transaction\n",
+ err_print_prefix);
+ if (err_sum & IO7__POX_ERRSUM__MABORT_MASK)
+ printk("%s Master Abort\n", err_print_prefix);
+ if (err_sum & IO7__POX_ERRSUM__PT_TABORT)
+ printk("%s IO7 Asserted Target Abort\n", err_print_prefix);
+ if (err_sum & IO7__POX_ERRSUM__PM_TABORT)
+ printk("%s IO7 Received Target Abort\n", err_print_prefix);
+ if (err_sum & IO7__POX_ERRSUM__ADDRERR_STB) {
+ printk("%s Address or PCI-X Attribute Parity Error\n",
+ err_print_prefix);
+ if (err_sum & IO7__POX_ERRSUM__SERR)
+ printk("%s IO7 Asserted SERR\n", err_print_prefix);
+ }
+ if (err_sum & IO7__POX_ERRSUM__PERR) {
+ if (err_sum & IO7__POX_ERRSUM__DATAERR_STB_NIOW)
+ printk("%s IO7 Detected Data Parity Error\n",
+ err_print_prefix);
+ else
+ printk("%s Split Completion Response with "
+ "Parity Error\n", err_print_prefix);
+ }
+ if (err_sum & IO7__POX_ERRSUM__DETECTED_PERR)
+ printk("%s PERR detected\n", err_print_prefix);
+ if (err_sum & IO7__POX_ERRSUM__PM_PERR)
+ printk("%s PERR while IO7 is master\n", err_print_prefix);
+ if (err_sum & IO7__POX_ERRSUM__PT_SCERROR) {
+ printk("%s IO7 Received Split Completion Error message\n",
+ err_print_prefix);
+ marvel_print_pox_spl_cmplt(port->pox_spl_cmplt);
+ }
+ if (err_sum & IO7__POX_ERRSUM__UPE_ERROR) {
+ unsigned int upe_error = EXTRACT(err_sum,
+ IO7__POX_ERRSUM__UPE_ERROR);
+ int i;
+ static char *upe_errors[] = {
+ "Parity Error on MSI write data",
+ "MSI read (MSI window is write only",
+ "TLB - Invalid WR transaction",
+ "TLB - Invalid RD transaction",
+ "DMA - WR error (see north port)",
+ "DMA - RD error (see north port)",
+ "PPR - WR error (see north port)",
+ "PPR - RD error (see north port)"
+ };
+
+ printk("%s UPE Error:\n", err_print_prefix);
+ for (i = 0; i < 8; i++) {
+ if (upe_error & (1 << i))
+ printk("%s %s\n", err_print_prefix,
+ upe_errors[i]);
+ }
+ }
+
+ /*
+ * POx_TRANS_SUM, if appropriate.
+ */
+ if (err_sum & IO7__POX_ERRSUM__TRANS_SUM__MASK)
+ marvel_print_pox_trans_sum(port->pox_trans_sum);
+
+ /*
+ * Then TLB_ERR.
+ */
+ if (err_sum & IO7__POX_ERRSUM__TLB_ERR) {
+ printk("%s TLB ERROR\n", err_print_prefix);
+ marvel_print_pox_tlb_err(port->pox_tlb_err);
+ }
+
+ /*
+ * And the single bit status errors.
+ */
+ if (err_sum & IO7__POX_ERRSUM__AGP_REQQ_OVFL)
+ printk("%s AGP Request Queue Overflow\n", err_print_prefix);
+ if (err_sum & IO7__POX_ERRSUM__AGP_SYNC_ERR)
+ printk("%s AGP Sync Error\n", err_print_prefix);
+ if (err_sum & IO7__POX_ERRSUM__PCIX_DISCARD_SPL)
+ printk("%s Discarded split completion\n", err_print_prefix);
+ if (err_sum & IO7__POX_ERRSUM__DMA_RD_TO)
+ printk("%s DMA Read Timeout\n", err_print_prefix);
+ if (err_sum & IO7__POX_ERRSUM__CSR_NXM_RD)
+ printk("%s CSR NXM READ\n", err_print_prefix);
+ if (err_sum & IO7__POX_ERRSUM__CSR_NXM_WR)
+ printk("%s CSR NXM WRITE\n", err_print_prefix);
+ if (err_sum & IO7__POX_ERRSUM__DETECTED_SERR)
+ printk("%s SERR detected\n", err_print_prefix);
+ if (err_sum & IO7__POX_ERRSUM__HUNG_BUS)
+ printk("%s HUNG BUS detected\n", err_print_prefix);
+}
+
+#endif /* CONFIG_VERBOSE_MCHECK */
+
+static struct ev7_pal_io_subpacket *
+marvel_find_io7_with_error(struct ev7_lf_subpackets *lf_subpackets)
+{
+ struct ev7_pal_io_subpacket *io = lf_subpackets->io;
+ struct io7 *io7;
+ int i;
+
+ /*
+ * Caller must provide the packet to fill
+ */
+ if (!io)
+ return NULL;
+
+ /*
+ * Fill the subpacket with the console's standard fill pattern
+ */
+ memset(io, 0x55, sizeof(*io));
+
+ for (io7 = NULL; NULL != (io7 = marvel_next_io7(io7)); ) {
+ unsigned long err_sum = 0;
+
+ err_sum |= io7->csrs->PO7_ERROR_SUM.csr;
+ for (i = 0; i < IO7_NUM_PORTS; i++) {
+ if (!io7->ports[i].enabled)
+ continue;
+ err_sum |= io7->ports[i].csrs->POx_ERR_SUM.csr;
+ }
+
+ /*
+ * Is there at least one error?
+ */
+ if (err_sum & (1UL << 63))
+ break;
+ }
+
+ /*
+ * Did we find an IO7 with an error?
+ */
+ if (!io7)
+ return NULL;
+
+ /*
+ * We have an IO7 with an error.
+ *
+ * Fill in the IO subpacket.
+ */
+ io->io_asic_rev = io7->csrs->IO_ASIC_REV.csr;
+ io->io_sys_rev = io7->csrs->IO_SYS_REV.csr;
+ io->io7_uph = io7->csrs->IO7_UPH.csr;
+ io->hpi_ctl = io7->csrs->HPI_CTL.csr;
+ io->crd_ctl = io7->csrs->CRD_CTL.csr;
+ io->hei_ctl = io7->csrs->HEI_CTL.csr;
+ io->po7_error_sum = io7->csrs->PO7_ERROR_SUM.csr;
+ io->po7_uncrr_sym = io7->csrs->PO7_UNCRR_SYM.csr;
+ io->po7_crrct_sym = io7->csrs->PO7_CRRCT_SYM.csr;
+ io->po7_ugbge_sym = io7->csrs->PO7_UGBGE_SYM.csr;
+ io->po7_err_pkt0 = io7->csrs->PO7_ERR_PKT[0].csr;
+ io->po7_err_pkt1 = io7->csrs->PO7_ERR_PKT[1].csr;
+
+ for (i = 0; i < IO7_NUM_PORTS; i++) {
+ io7_ioport_csrs *csrs = io7->ports[i].csrs;
+
+ if (!io7->ports[i].enabled)
+ continue;
+
+ io->ports[i].pox_err_sum = csrs->POx_ERR_SUM.csr;
+ io->ports[i].pox_tlb_err = csrs->POx_TLB_ERR.csr;
+ io->ports[i].pox_spl_cmplt = csrs->POx_SPL_COMPLT.csr;
+ io->ports[i].pox_trans_sum = csrs->POx_TRANS_SUM.csr;
+ io->ports[i].pox_first_err = csrs->POx_FIRST_ERR.csr;
+ io->ports[i].pox_mult_err = csrs->POx_MULT_ERR.csr;
+ io->ports[i].pox_dm_source = csrs->POx_DM_SOURCE.csr;
+ io->ports[i].pox_dm_dest = csrs->POx_DM_DEST.csr;
+ io->ports[i].pox_dm_size = csrs->POx_DM_SIZE.csr;
+ io->ports[i].pox_dm_ctrl = csrs->POx_DM_CTRL.csr;
+
+ /*
+ * Ack this port's errors, if any. POx_ERR_SUM must be last.
+ *
+ * Most of the error registers get cleared and unlocked when
+ * the associated bits in POx_ERR_SUM are cleared (by writing
+ * 1). POx_TLB_ERR is an exception and must be explicitly
+ * cleared.
+ */
+ csrs->POx_TLB_ERR.csr = io->ports[i].pox_tlb_err;
+ csrs->POx_ERR_SUM.csr = io->ports[i].pox_err_sum;
+ mb();
+ csrs->POx_ERR_SUM.csr;
+ }
+
+ /*
+ * Ack any port 7 error(s).
+ */
+ io7->csrs->PO7_ERROR_SUM.csr = io->po7_error_sum;
+ mb();
+ io7->csrs->PO7_ERROR_SUM.csr;
+
+ /*
+ * Correct the io7_pid.
+ */
+ lf_subpackets->io_pid = io7->pe;
+
+ return io;
+}
+
+static int
+marvel_process_io_error(struct ev7_lf_subpackets *lf_subpackets, int print)
+{
+ int status = MCHK_DISPOSITION_UNKNOWN_ERROR;
+
+#ifdef CONFIG_VERBOSE_MCHECK
+ struct ev7_pal_io_subpacket *io = lf_subpackets->io;
+ int i;
+#endif /* CONFIG_VERBOSE_MCHECK */
+
+#define MARVEL_IO_ERR_VALID(x) ((x) & (1UL << 63))
+
+ if (!lf_subpackets->logout || !lf_subpackets->io)
+ return status;
+
+ /*
+ * The PALcode only builds an IO subpacket if there is a
+ * locally connected IO7. In the cases of
+ * 1) a uniprocessor kernel
+ * 2) an mp kernel before the local secondary has called in
+ * error interrupts are all directed to the primary processor.
+ * In that case, we may not have an IO subpacket at all and, event
+ * if we do, it may not be the right now.
+ *
+ * If the RBOX indicates an I/O error interrupt, make sure we have
+ * the correct IO7 information. If we don't have an IO subpacket
+ * or it's the wrong one, try to find the right one.
+ *
+ * RBOX I/O error interrupts are indicated by RBOX_INT<29> and
+ * RBOX_INT<10>.
+ */
+ if ((lf_subpackets->io->po7_error_sum & (1UL << 32)) ||
+ ((lf_subpackets->io->po7_error_sum |
+ lf_subpackets->io->ports[0].pox_err_sum |
+ lf_subpackets->io->ports[1].pox_err_sum |
+ lf_subpackets->io->ports[2].pox_err_sum |
+ lf_subpackets->io->ports[3].pox_err_sum) & (1UL << 63))) {
+ /*
+ * Either we have no IO subpacket or no error is
+ * indicated in the one we do have. Try find the
+ * one with the error.
+ */
+ if (!marvel_find_io7_with_error(lf_subpackets))
+ return status;
+ }
+
+ /*
+ * We have an IO7 indicating an error - we're going to report it
+ */
+ status = MCHK_DISPOSITION_REPORT;
+
+#ifdef CONFIG_VERBOSE_MCHECK
+
+ if (!print)
+ return status;
+
+ printk("%s*Error occurred on IO7 at PID %u\n",
+ err_print_prefix, lf_subpackets->io_pid);
+
+ /*
+ * Check port 7 first
+ */
+ if (lf_subpackets->io->po7_error_sum & IO7__PO7_ERRSUM__ERR_MASK) {
+ marvel_print_po7_err_sum(io);
+
+#if 0
+ printk("%s PORT 7 ERROR:\n"
+ "%s PO7_ERROR_SUM: %016lx\n"
+ "%s PO7_UNCRR_SYM: %016lx\n"
+ "%s PO7_CRRCT_SYM: %016lx\n"
+ "%s PO7_UGBGE_SYM: %016lx\n"
+ "%s PO7_ERR_PKT0: %016lx\n"
+ "%s PO7_ERR_PKT1: %016lx\n",
+ err_print_prefix,
+ err_print_prefix, io->po7_error_sum,
+ err_print_prefix, io->po7_uncrr_sym,
+ err_print_prefix, io->po7_crrct_sym,
+ err_print_prefix, io->po7_ugbge_sym,
+ err_print_prefix, io->po7_err_pkt0,
+ err_print_prefix, io->po7_err_pkt1);
+#endif
+ }
+
+ /*
+ * Then loop through the ports
+ */
+ for (i = 0; i < IO7_NUM_PORTS; i++) {
+ if (!MARVEL_IO_ERR_VALID(io->ports[i].pox_err_sum))
+ continue;
+
+ printk("%s PID %u PORT %d POx_ERR_SUM: %016lx\n",
+ err_print_prefix,
+ lf_subpackets->io_pid, i, io->ports[i].pox_err_sum);
+ marvel_print_pox_err(io->ports[i].pox_err_sum, &io->ports[i]);
+
+ printk("%s [ POx_FIRST_ERR: %016lx ]\n",
+ err_print_prefix, io->ports[i].pox_first_err);
+ marvel_print_pox_err(io->ports[i].pox_first_err,
+ &io->ports[i]);
+
+ }
+
+
+#endif /* CONFIG_VERBOSE_MCHECK */
+
+ return status;
+}
+
+static int
+marvel_process_logout_frame(struct ev7_lf_subpackets *lf_subpackets, int print)
+{
+ int status = MCHK_DISPOSITION_UNKNOWN_ERROR;
+
+ /*
+ * I/O error?
+ */
+#define EV7__RBOX_INT__IO_ERROR__MASK 0x20000400ul
+ if (lf_subpackets->logout &&
+ (lf_subpackets->logout->rbox_int & 0x20000400ul))
+ status = marvel_process_io_error(lf_subpackets, print);
+
+ /*
+ * Probing behind PCI-X bridges can cause machine checks on
+ * Marvel when the probe is handled by the bridge as a split
+ * completion transaction. The symptom is an ERROR_RESPONSE
+ * to a CONFIG address. Since these errors will happen in
+ * normal operation, dismiss them.
+ *
+ * Dismiss if:
+ * C_STAT = 0x14 (Error Reponse)
+ * C_STS<3> = 0 (C_ADDR valid)
+ * C_ADDR<42> = 1 (I/O)
+ * C_ADDR<31:22> = 111110xxb (PCI Config space)
+ */
+ if (lf_subpackets->ev7 &&
+ (lf_subpackets->ev7->c_stat == 0x14) &&
+ !(lf_subpackets->ev7->c_sts & 0x8) &&
+ ((lf_subpackets->ev7->c_addr & 0x400ff000000ul)
+ == 0x400fe000000ul))
+ status = MCHK_DISPOSITION_DISMISS;
+
+ return status;
+}
+
+void
+marvel_machine_check(u64 vector, u64 la_ptr, struct pt_regs *regs)
+{
+ struct el_subpacket *el_ptr = (struct el_subpacket *)la_ptr;
+ int (*process_frame)(struct ev7_lf_subpackets *, int) = NULL;
+ struct ev7_lf_subpackets subpacket_collection = { NULL, };
+ struct ev7_pal_io_subpacket scratch_io_packet = { 0, };
+ struct ev7_lf_subpackets *lf_subpackets = NULL;
+ int disposition = MCHK_DISPOSITION_UNKNOWN_ERROR;
+ char *saved_err_prefix = err_print_prefix;
+ char *error_type = NULL;
+
+ /*
+ * Sync the processor
+ */
+ mb();
+ draina();
+
+ switch(vector) {
+ case SCB_Q_SYSEVENT:
+ process_frame = marvel_process_680_frame;
+ error_type = "System Event";
+ break;
+
+ case SCB_Q_SYSMCHK:
+ process_frame = marvel_process_logout_frame;
+ error_type = "System Uncorrectable Error";
+ break;
+
+ case SCB_Q_SYSERR:
+ process_frame = marvel_process_logout_frame;
+ error_type = "System Correctable Error";
+ break;
+
+ default:
+ /* Don't know it - pass it up. */
+ ev7_machine_check(vector, la_ptr, regs);
+ return;
+ }
+
+ /*
+ * A system event or error has occured, handle it here.
+ *
+ * Any errors in the logout frame have already been cleared by the
+ * PALcode, so just parse it.
+ */
+ err_print_prefix = KERN_CRIT;
+
+ /*
+ * Parse the logout frame without printing first. If the only error(s)
+ * found are classified as "dismissable", then just dismiss them and
+ * don't print any message
+ */
+ lf_subpackets =
+ ev7_collect_logout_frame_subpackets(el_ptr,
+ &subpacket_collection);
+ if (process_frame && lf_subpackets && lf_subpackets->logout) {
+ /*
+ * We might not have the correct (or any) I/O subpacket.
+ * [ See marvel_process_io_error() for explanation. ]
+ * If we don't have one, point the io subpacket in
+ * lf_subpackets at scratch_io_packet so that
+ * marvel_find_io7_with_error() will have someplace to
+ * store the info.
+ */
+ if (!lf_subpackets->io)
+ lf_subpackets->io = &scratch_io_packet;
+
+ /*
+ * Default io_pid to the processor reporting the error
+ * [this will get changed in marvel_find_io7_with_error()
+ * if a different one is needed]
+ */
+ lf_subpackets->io_pid = lf_subpackets->logout->whami;
+
+ /*
+ * Evaluate the frames.
+ */
+ disposition = process_frame(lf_subpackets, 0);
+ }
+ switch(disposition) {
+ case MCHK_DISPOSITION_DISMISS:
+ /* Nothing to do. */
+ break;
+
+ case MCHK_DISPOSITION_REPORT:
+ /* Recognized error, report it. */
+ printk("%s*%s (Vector 0x%x) reported on CPU %d\n",
+ err_print_prefix, error_type,
+ (unsigned int)vector, (int)smp_processor_id());
+ el_print_timestamp(&lf_subpackets->logout->timestamp);
+ process_frame(lf_subpackets, 1);
+ break;
+
+ default:
+ /* Unknown - dump the annotated subpackets. */
+ printk("%s*%s (Vector 0x%x) reported on CPU %d\n",
+ err_print_prefix, error_type,
+ (unsigned int)vector, (int)smp_processor_id());
+ el_process_subpacket(el_ptr);
+ break;
+
+ }
+
+ err_print_prefix = saved_err_prefix;
+
+ /* Release the logout frame. */
+ wrmces(0x7);
+ mb();
+}
+
+void
+marvel_register_error_handlers(void)
+{
+ ev7_register_error_handlers();
+}
diff --git a/arch/alpha/kernel/err_titan.c b/arch/alpha/kernel/err_titan.c
new file mode 100644
index 000000000000..7e6720d45f02
--- /dev/null
+++ b/arch/alpha/kernel/err_titan.c
@@ -0,0 +1,756 @@
+/*
+ * linux/arch/alpha/kernel/err_titan.c
+ *
+ * Copyright (C) 2000 Jeff Wiedemeier (Compaq Computer Corporation)
+ *
+ * Error handling code supporting TITAN systems
+ */
+
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/sched.h>
+
+#include <asm/io.h>
+#include <asm/core_titan.h>
+#include <asm/hwrpb.h>
+#include <asm/smp.h>
+#include <asm/err_common.h>
+#include <asm/err_ev6.h>
+
+#include "err_impl.h"
+#include "proto.h"
+
+
+static int
+titan_parse_c_misc(u64 c_misc, int print)
+{
+#ifdef CONFIG_VERBOSE_MCHECK
+ char *src;
+ int nxs = 0;
+#endif
+ int status = MCHK_DISPOSITION_REPORT;
+
+#define TITAN__CCHIP_MISC__NXM (1UL << 28)
+#define TITAN__CCHIP_MISC__NXS__S (29)
+#define TITAN__CCHIP_MISC__NXS__M (0x7)
+
+ if (!(c_misc & TITAN__CCHIP_MISC__NXM))
+ return MCHK_DISPOSITION_UNKNOWN_ERROR;
+
+#ifdef CONFIG_VERBOSE_MCHECK
+ if (!print)
+ return status;
+
+ nxs = EXTRACT(c_misc, TITAN__CCHIP_MISC__NXS);
+ switch(nxs) {
+ case 0: /* CPU 0 */
+ case 1: /* CPU 1 */
+ case 2: /* CPU 2 */
+ case 3: /* CPU 3 */
+ src = "CPU";
+ /* num is already the CPU number */
+ break;
+ case 4: /* Pchip 0 */
+ case 5: /* Pchip 1 */
+ src = "Pchip";
+ nxs -= 4;
+ break;
+ default:/* reserved */
+ src = "Unknown, NXS =";
+ /* leave num untouched */
+ break;
+ }
+
+ printk("%s Non-existent memory access from: %s %d\n",
+ err_print_prefix, src, nxs);
+#endif /* CONFIG_VERBOSE_MCHECK */
+
+ return status;
+}
+
+static int
+titan_parse_p_serror(int which, u64 serror, int print)
+{
+ int status = MCHK_DISPOSITION_REPORT;
+
+#ifdef CONFIG_VERBOSE_MCHECK
+ char *serror_src[] = {"GPCI", "APCI", "AGP HP", "AGP LP"};
+ char *serror_cmd[] = {"DMA Read", "DMA RMW", "SGTE Read", "Reserved"};
+#endif /* CONFIG_VERBOSE_MCHECK */
+
+#define TITAN__PCHIP_SERROR__LOST_UECC (1UL << 0)
+#define TITAN__PCHIP_SERROR__UECC (1UL << 1)
+#define TITAN__PCHIP_SERROR__CRE (1UL << 2)
+#define TITAN__PCHIP_SERROR__NXIO (1UL << 3)
+#define TITAN__PCHIP_SERROR__LOST_CRE (1UL << 4)
+#define TITAN__PCHIP_SERROR__ECCMASK (TITAN__PCHIP_SERROR__UECC | \
+ TITAN__PCHIP_SERROR__CRE)
+#define TITAN__PCHIP_SERROR__ERRMASK (TITAN__PCHIP_SERROR__LOST_UECC | \
+ TITAN__PCHIP_SERROR__UECC | \
+ TITAN__PCHIP_SERROR__CRE | \
+ TITAN__PCHIP_SERROR__NXIO | \
+ TITAN__PCHIP_SERROR__LOST_CRE)
+#define TITAN__PCHIP_SERROR__SRC__S (52)
+#define TITAN__PCHIP_SERROR__SRC__M (0x3)
+#define TITAN__PCHIP_SERROR__CMD__S (54)
+#define TITAN__PCHIP_SERROR__CMD__M (0x3)
+#define TITAN__PCHIP_SERROR__SYN__S (56)
+#define TITAN__PCHIP_SERROR__SYN__M (0xff)
+#define TITAN__PCHIP_SERROR__ADDR__S (15)
+#define TITAN__PCHIP_SERROR__ADDR__M (0xffffffffUL)
+
+ if (!(serror & TITAN__PCHIP_SERROR__ERRMASK))
+ return MCHK_DISPOSITION_UNKNOWN_ERROR;
+
+#ifdef CONFIG_VERBOSE_MCHECK
+ if (!print)
+ return status;
+
+ printk("%s PChip %d SERROR: %016lx\n",
+ err_print_prefix, which, serror);
+ if (serror & TITAN__PCHIP_SERROR__ECCMASK) {
+ printk("%s %sorrectable ECC Error:\n"
+ " Source: %-6s Command: %-8s Syndrome: 0x%08x\n"
+ " Address: 0x%lx\n",
+ err_print_prefix,
+ (serror & TITAN__PCHIP_SERROR__UECC) ? "Unc" : "C",
+ serror_src[EXTRACT(serror, TITAN__PCHIP_SERROR__SRC)],
+ serror_cmd[EXTRACT(serror, TITAN__PCHIP_SERROR__CMD)],
+ (unsigned)EXTRACT(serror, TITAN__PCHIP_SERROR__SYN),
+ EXTRACT(serror, TITAN__PCHIP_SERROR__ADDR));
+ }
+ if (serror & TITAN__PCHIP_SERROR__NXIO)
+ printk("%s Non Existent I/O Error\n", err_print_prefix);
+ if (serror & TITAN__PCHIP_SERROR__LOST_UECC)
+ printk("%s Lost Uncorrectable ECC Error\n",
+ err_print_prefix);
+ if (serror & TITAN__PCHIP_SERROR__LOST_CRE)
+ printk("%s Lost Correctable ECC Error\n", err_print_prefix);
+#endif /* CONFIG_VERBOSE_MCHECK */
+
+ return status;
+}
+
+static int
+titan_parse_p_perror(int which, int port, u64 perror, int print)
+{
+ int cmd;
+ unsigned long addr;
+ int status = MCHK_DISPOSITION_REPORT;
+
+#ifdef CONFIG_VERBOSE_MCHECK
+ char *perror_cmd[] = { "Interrupt Acknowledge", "Special Cycle",
+ "I/O Read", "I/O Write",
+ "Reserved", "Reserved",
+ "Memory Read", "Memory Write",
+ "Reserved", "Reserved",
+ "Configuration Read", "Configuration Write",
+ "Memory Read Multiple", "Dual Address Cycle",
+ "Memory Read Line","Memory Write and Invalidate"
+ };
+#endif /* CONFIG_VERBOSE_MCHECK */
+
+#define TITAN__PCHIP_PERROR__LOST (1UL << 0)
+#define TITAN__PCHIP_PERROR__SERR (1UL << 1)
+#define TITAN__PCHIP_PERROR__PERR (1UL << 2)
+#define TITAN__PCHIP_PERROR__DCRTO (1UL << 3)
+#define TITAN__PCHIP_PERROR__SGE (1UL << 4)
+#define TITAN__PCHIP_PERROR__APE (1UL << 5)
+#define TITAN__PCHIP_PERROR__TA (1UL << 6)
+#define TITAN__PCHIP_PERROR__DPE (1UL << 7)
+#define TITAN__PCHIP_PERROR__NDS (1UL << 8)
+#define TITAN__PCHIP_PERROR__IPTPR (1UL << 9)
+#define TITAN__PCHIP_PERROR__IPTPW (1UL << 10)
+#define TITAN__PCHIP_PERROR__ERRMASK (TITAN__PCHIP_PERROR__LOST | \
+ TITAN__PCHIP_PERROR__SERR | \
+ TITAN__PCHIP_PERROR__PERR | \
+ TITAN__PCHIP_PERROR__DCRTO | \
+ TITAN__PCHIP_PERROR__SGE | \
+ TITAN__PCHIP_PERROR__APE | \
+ TITAN__PCHIP_PERROR__TA | \
+ TITAN__PCHIP_PERROR__DPE | \
+ TITAN__PCHIP_PERROR__NDS | \
+ TITAN__PCHIP_PERROR__IPTPR | \
+ TITAN__PCHIP_PERROR__IPTPW)
+#define TITAN__PCHIP_PERROR__DAC (1UL << 47)
+#define TITAN__PCHIP_PERROR__MWIN (1UL << 48)
+#define TITAN__PCHIP_PERROR__CMD__S (52)
+#define TITAN__PCHIP_PERROR__CMD__M (0x0f)
+#define TITAN__PCHIP_PERROR__ADDR__S (14)
+#define TITAN__PCHIP_PERROR__ADDR__M (0x1fffffffful)
+
+ if (!(perror & TITAN__PCHIP_PERROR__ERRMASK))
+ return MCHK_DISPOSITION_UNKNOWN_ERROR;
+
+ cmd = EXTRACT(perror, TITAN__PCHIP_PERROR__CMD);
+ addr = EXTRACT(perror, TITAN__PCHIP_PERROR__ADDR) << 2;
+
+ /*
+ * Initializing the BIOS on a video card on a bus without
+ * a south bridge (subtractive decode agent) can result in
+ * master aborts as the BIOS probes the capabilities of the
+ * card. XFree86 does such initialization. If the error
+ * is a master abort (No DevSel as PCI Master) and the command
+ * is an I/O read or write below the address where we start
+ * assigning PCI I/O spaces (SRM uses 0x1000), then mark the
+ * error as dismissable so starting XFree86 doesn't result
+ * in a series of uncorrectable errors being reported. Also
+ * dismiss master aborts to VGA frame buffer space
+ * (0xA0000 - 0xC0000) and legacy BIOS space (0xC0000 - 0x100000)
+ * for the same reason.
+ *
+ * Also mark the error dismissible if it looks like the right
+ * error but only the Lost bit is set. Since the BIOS initialization
+ * can cause multiple master aborts and the error interrupt can
+ * be handled on a different CPU than the BIOS code is run on,
+ * it is possible for a second master abort to occur between the
+ * time the PALcode reads PERROR and the time it writes PERROR
+ * to acknowledge the error. If this timing happens, a second
+ * error will be signalled after the first, and if no additional
+ * errors occur, will look like a Lost error with no additional
+ * errors on the same transaction as the previous error.
+ */
+ if (((perror & TITAN__PCHIP_PERROR__NDS) ||
+ ((perror & TITAN__PCHIP_PERROR__ERRMASK) ==
+ TITAN__PCHIP_PERROR__LOST)) &&
+ ((((cmd & 0xE) == 2) && (addr < 0x1000)) ||
+ (((cmd & 0xE) == 6) && (addr >= 0xA0000) && (addr < 0x100000)))) {
+ status = MCHK_DISPOSITION_DISMISS;
+ }
+
+#ifdef CONFIG_VERBOSE_MCHECK
+ if (!print)
+ return status;
+
+ printk("%s PChip %d %cPERROR: %016lx\n",
+ err_print_prefix, which,
+ port ? 'A' : 'G', perror);
+ if (perror & TITAN__PCHIP_PERROR__IPTPW)
+ printk("%s Invalid Peer-to-Peer Write\n", err_print_prefix);
+ if (perror & TITAN__PCHIP_PERROR__IPTPR)
+ printk("%s Invalid Peer-to-Peer Read\n", err_print_prefix);
+ if (perror & TITAN__PCHIP_PERROR__NDS)
+ printk("%s No DEVSEL as PCI Master [Master Abort]\n",
+ err_print_prefix);
+ if (perror & TITAN__PCHIP_PERROR__DPE)
+ printk("%s Data Parity Error\n", err_print_prefix);
+ if (perror & TITAN__PCHIP_PERROR__TA)
+ printk("%s Target Abort\n", err_print_prefix);
+ if (perror & TITAN__PCHIP_PERROR__APE)
+ printk("%s Address Parity Error\n", err_print_prefix);
+ if (perror & TITAN__PCHIP_PERROR__SGE)
+ printk("%s Scatter-Gather Error, Invalid PTE\n",
+ err_print_prefix);
+ if (perror & TITAN__PCHIP_PERROR__DCRTO)
+ printk("%s Delayed-Completion Retry Timeout\n",
+ err_print_prefix);
+ if (perror & TITAN__PCHIP_PERROR__PERR)
+ printk("%s PERR Asserted\n", err_print_prefix);
+ if (perror & TITAN__PCHIP_PERROR__SERR)
+ printk("%s SERR Asserted\n", err_print_prefix);
+ if (perror & TITAN__PCHIP_PERROR__LOST)
+ printk("%s Lost Error\n", err_print_prefix);
+ printk("%s Command: 0x%x - %s\n"
+ " Address: 0x%lx\n",
+ err_print_prefix,
+ cmd, perror_cmd[cmd],
+ addr);
+ if (perror & TITAN__PCHIP_PERROR__DAC)
+ printk("%s Dual Address Cycle\n", err_print_prefix);
+ if (perror & TITAN__PCHIP_PERROR__MWIN)
+ printk("%s Hit in Monster Window\n", err_print_prefix);
+#endif /* CONFIG_VERBOSE_MCHECK */
+
+ return status;
+}
+
+static int
+titan_parse_p_agperror(int which, u64 agperror, int print)
+{
+ int status = MCHK_DISPOSITION_REPORT;
+#ifdef CONFIG_VERBOSE_MCHECK
+ int cmd, len;
+ unsigned long addr;
+
+ char *agperror_cmd[] = { "Read (low-priority)", "Read (high-priority)",
+ "Write (low-priority)",
+ "Write (high-priority)",
+ "Reserved", "Reserved",
+ "Flush", "Fence"
+ };
+#endif /* CONFIG_VERBOSE_MCHECK */
+
+#define TITAN__PCHIP_AGPERROR__LOST (1UL << 0)
+#define TITAN__PCHIP_AGPERROR__LPQFULL (1UL << 1)
+#define TITAN__PCHIP_AGPERROR__HPQFULL (1UL << 2)
+#define TITAN__PCHIP_AGPERROR__RESCMD (1UL << 3)
+#define TITAN__PCHIP_AGPERROR__IPTE (1UL << 4)
+#define TITAN__PCHIP_AGPERROR__PTP (1UL << 5)
+#define TITAN__PCHIP_AGPERROR__NOWINDOW (1UL << 6)
+#define TITAN__PCHIP_AGPERROR__ERRMASK (TITAN__PCHIP_AGPERROR__LOST | \
+ TITAN__PCHIP_AGPERROR__LPQFULL | \
+ TITAN__PCHIP_AGPERROR__HPQFULL | \
+ TITAN__PCHIP_AGPERROR__RESCMD | \
+ TITAN__PCHIP_AGPERROR__IPTE | \
+ TITAN__PCHIP_AGPERROR__PTP | \
+ TITAN__PCHIP_AGPERROR__NOWINDOW)
+#define TITAN__PCHIP_AGPERROR__DAC (1UL << 48)
+#define TITAN__PCHIP_AGPERROR__MWIN (1UL << 49)
+#define TITAN__PCHIP_AGPERROR__FENCE (1UL << 59)
+#define TITAN__PCHIP_AGPERROR__CMD__S (50)
+#define TITAN__PCHIP_AGPERROR__CMD__M (0x07)
+#define TITAN__PCHIP_AGPERROR__ADDR__S (15)
+#define TITAN__PCHIP_AGPERROR__ADDR__M (0xffffffffUL)
+#define TITAN__PCHIP_AGPERROR__LEN__S (53)
+#define TITAN__PCHIP_AGPERROR__LEN__M (0x3f)
+
+ if (!(agperror & TITAN__PCHIP_AGPERROR__ERRMASK))
+ return MCHK_DISPOSITION_UNKNOWN_ERROR;
+
+#ifdef CONFIG_VERBOSE_MCHECK
+ if (!print)
+ return status;
+
+ cmd = EXTRACT(agperror, TITAN__PCHIP_AGPERROR__CMD);
+ addr = EXTRACT(agperror, TITAN__PCHIP_AGPERROR__ADDR) << 3;
+ len = EXTRACT(agperror, TITAN__PCHIP_AGPERROR__LEN);
+
+ printk("%s PChip %d AGPERROR: %016lx\n", err_print_prefix,
+ which, agperror);
+ if (agperror & TITAN__PCHIP_AGPERROR__NOWINDOW)
+ printk("%s No Window\n", err_print_prefix);
+ if (agperror & TITAN__PCHIP_AGPERROR__PTP)
+ printk("%s Peer-to-Peer set\n", err_print_prefix);
+ if (agperror & TITAN__PCHIP_AGPERROR__IPTE)
+ printk("%s Invalid PTE\n", err_print_prefix);
+ if (agperror & TITAN__PCHIP_AGPERROR__RESCMD)
+ printk("%s Reserved Command\n", err_print_prefix);
+ if (agperror & TITAN__PCHIP_AGPERROR__HPQFULL)
+ printk("%s HP Transaction Received while Queue Full\n",
+ err_print_prefix);
+ if (agperror & TITAN__PCHIP_AGPERROR__LPQFULL)
+ printk("%s LP Transaction Received while Queue Full\n",
+ err_print_prefix);
+ if (agperror & TITAN__PCHIP_AGPERROR__LOST)
+ printk("%s Lost Error\n", err_print_prefix);
+ printk("%s Command: 0x%x - %s, %d Quadwords%s\n"
+ " Address: 0x%lx\n",
+ err_print_prefix, cmd, agperror_cmd[cmd], len,
+ (agperror & TITAN__PCHIP_AGPERROR__FENCE) ? ", FENCE" : "",
+ addr);
+ if (agperror & TITAN__PCHIP_AGPERROR__DAC)
+ printk("%s Dual Address Cycle\n", err_print_prefix);
+ if (agperror & TITAN__PCHIP_AGPERROR__MWIN)
+ printk("%s Hit in Monster Window\n", err_print_prefix);
+#endif /* CONFIG_VERBOSE_MCHECK */
+
+ return status;
+}
+
+static int
+titan_parse_p_chip(int which, u64 serror, u64 gperror,
+ u64 aperror, u64 agperror, int print)
+{
+ int status = MCHK_DISPOSITION_UNKNOWN_ERROR;
+ status |= titan_parse_p_serror(which, serror, print);
+ status |= titan_parse_p_perror(which, 0, gperror, print);
+ status |= titan_parse_p_perror(which, 1, aperror, print);
+ status |= titan_parse_p_agperror(which, agperror, print);
+ return status;
+}
+
+int
+titan_process_logout_frame(struct el_common *mchk_header, int print)
+{
+ struct el_TITAN_sysdata_mcheck *tmchk =
+ (struct el_TITAN_sysdata_mcheck *)
+ ((unsigned long)mchk_header + mchk_header->sys_offset);
+ int status = MCHK_DISPOSITION_UNKNOWN_ERROR;
+
+ status |= titan_parse_c_misc(tmchk->c_misc, print);
+ status |= titan_parse_p_chip(0, tmchk->p0_serror, tmchk->p0_gperror,
+ tmchk->p0_aperror, tmchk->p0_agperror,
+ print);
+ status |= titan_parse_p_chip(1, tmchk->p1_serror, tmchk->p1_gperror,
+ tmchk->p1_aperror, tmchk->p1_agperror,
+ print);
+
+ return status;
+}
+
+void
+titan_machine_check(u64 vector, u64 la_ptr, struct pt_regs *regs)
+{
+ struct el_common *mchk_header = (struct el_common *)la_ptr;
+ struct el_TITAN_sysdata_mcheck *tmchk =
+ (struct el_TITAN_sysdata_mcheck *)
+ ((unsigned long)mchk_header + mchk_header->sys_offset);
+ u64 irqmask;
+
+ /*
+ * Mask of Titan interrupt sources which are reported as machine checks
+ *
+ * 63 - CChip Error
+ * 62 - PChip 0 H_Error
+ * 61 - PChip 1 H_Error
+ * 60 - PChip 0 C_Error
+ * 59 - PChip 1 C_Error
+ */
+#define TITAN_MCHECK_INTERRUPT_MASK 0xF800000000000000UL
+
+ /*
+ * Sync the processor
+ */
+ mb();
+ draina();
+
+ /*
+ * Only handle system errors here
+ */
+ if ((vector != SCB_Q_SYSMCHK) && (vector != SCB_Q_SYSERR)) {
+ ev6_machine_check(vector, la_ptr, regs);
+ return;
+ }
+
+ /*
+ * It's a system error, handle it here
+ *
+ * The PALcode has already cleared the error, so just parse it
+ */
+
+ /*
+ * Parse the logout frame without printing first. If the only error(s)
+ * found are classified as "dismissable", then just dismiss them and
+ * don't print any message
+ */
+ if (titan_process_logout_frame(mchk_header, 0) !=
+ MCHK_DISPOSITION_DISMISS) {
+ char *saved_err_prefix = err_print_prefix;
+ err_print_prefix = KERN_CRIT;
+
+ /*
+ * Either a nondismissable error was detected or no
+ * recognized error was detected in the logout frame
+ * -- report the error in either case
+ */
+ printk("%s"
+ "*System %s Error (Vector 0x%x) reported on CPU %d:\n",
+ err_print_prefix,
+ (vector == SCB_Q_SYSERR)?"Correctable":"Uncorrectable",
+ (unsigned int)vector, (int)smp_processor_id());
+
+#ifdef CONFIG_VERBOSE_MCHECK
+ titan_process_logout_frame(mchk_header, alpha_verbose_mcheck);
+ if (alpha_verbose_mcheck)
+ dik_show_regs(regs, NULL);
+#endif /* CONFIG_VERBOSE_MCHECK */
+
+ err_print_prefix = saved_err_prefix;
+
+ /*
+ * Convert any pending interrupts which report as system
+ * machine checks to interrupts
+ */
+ irqmask = tmchk->c_dirx & TITAN_MCHECK_INTERRUPT_MASK;
+ titan_dispatch_irqs(irqmask, regs);
+ }
+
+
+ /*
+ * Release the logout frame
+ */
+ wrmces(0x7);
+ mb();
+}
+
+/*
+ * Subpacket Annotations
+ */
+static char *el_titan_pchip0_extended_annotation[] = {
+ "Subpacket Header", "P0_SCTL", "P0_SERREN",
+ "P0_APCTL", "P0_APERREN", "P0_AGPERREN",
+ "P0_ASPRST", "P0_AWSBA0", "P0_AWSBA1",
+ "P0_AWSBA2", "P0_AWSBA3", "P0_AWSM0",
+ "P0_AWSM1", "P0_AWSM2", "P0_AWSM3",
+ "P0_ATBA0", "P0_ATBA1", "P0_ATBA2",
+ "P0_ATBA3", "P0_GPCTL", "P0_GPERREN",
+ "P0_GSPRST", "P0_GWSBA0", "P0_GWSBA1",
+ "P0_GWSBA2", "P0_GWSBA3", "P0_GWSM0",
+ "P0_GWSM1", "P0_GWSM2", "P0_GWSM3",
+ "P0_GTBA0", "P0_GTBA1", "P0_GTBA2",
+ "P0_GTBA3", NULL
+};
+static char *el_titan_pchip1_extended_annotation[] = {
+ "Subpacket Header", "P1_SCTL", "P1_SERREN",
+ "P1_APCTL", "P1_APERREN", "P1_AGPERREN",
+ "P1_ASPRST", "P1_AWSBA0", "P1_AWSBA1",
+ "P1_AWSBA2", "P1_AWSBA3", "P1_AWSM0",
+ "P1_AWSM1", "P1_AWSM2", "P1_AWSM3",
+ "P1_ATBA0", "P1_ATBA1", "P1_ATBA2",
+ "P1_ATBA3", "P1_GPCTL", "P1_GPERREN",
+ "P1_GSPRST", "P1_GWSBA0", "P1_GWSBA1",
+ "P1_GWSBA2", "P1_GWSBA3", "P1_GWSM0",
+ "P1_GWSM1", "P1_GWSM2", "P1_GWSM3",
+ "P1_GTBA0", "P1_GTBA1", "P1_GTBA2",
+ "P1_GTBA3", NULL
+};
+static char *el_titan_memory_extended_annotation[] = {
+ "Subpacket Header", "AAR0", "AAR1",
+ "AAR2", "AAR3", "P0_SCTL",
+ "P0_GPCTL", "P0_APCTL", "P1_SCTL",
+ "P1_GPCTL", "P1_SCTL", NULL
+};
+
+static struct el_subpacket_annotation el_titan_annotations[] = {
+ SUBPACKET_ANNOTATION(EL_CLASS__REGATTA_FAMILY,
+ EL_TYPE__REGATTA__TITAN_PCHIP0_EXTENDED,
+ 1,
+ "Titan PChip 0 Extended Frame",
+ el_titan_pchip0_extended_annotation),
+ SUBPACKET_ANNOTATION(EL_CLASS__REGATTA_FAMILY,
+ EL_TYPE__REGATTA__TITAN_PCHIP1_EXTENDED,
+ 1,
+ "Titan PChip 1 Extended Frame",
+ el_titan_pchip1_extended_annotation),
+ SUBPACKET_ANNOTATION(EL_CLASS__REGATTA_FAMILY,
+ EL_TYPE__REGATTA__TITAN_MEMORY_EXTENDED,
+ 1,
+ "Titan Memory Extended Frame",
+ el_titan_memory_extended_annotation),
+ SUBPACKET_ANNOTATION(EL_CLASS__REGATTA_FAMILY,
+ EL_TYPE__TERMINATION__TERMINATION,
+ 1,
+ "Termination Subpacket",
+ NULL)
+};
+
+static struct el_subpacket *
+el_process_regatta_subpacket(struct el_subpacket *header)
+{
+ int status;
+
+ if (header->class != EL_CLASS__REGATTA_FAMILY) {
+ printk("%s ** Unexpected header CLASS %d TYPE %d, aborting\n",
+ err_print_prefix,
+ header->class, header->type);
+ return NULL;
+ }
+
+ switch(header->type) {
+ case EL_TYPE__REGATTA__PROCESSOR_ERROR_FRAME:
+ case EL_TYPE__REGATTA__SYSTEM_ERROR_FRAME:
+ case EL_TYPE__REGATTA__ENVIRONMENTAL_FRAME:
+ case EL_TYPE__REGATTA__PROCESSOR_DBL_ERROR_HALT:
+ case EL_TYPE__REGATTA__SYSTEM_DBL_ERROR_HALT:
+ printk("%s ** Occurred on CPU %d:\n",
+ err_print_prefix,
+ (int)header->by_type.regatta_frame.cpuid);
+ status = privateer_process_logout_frame((struct el_common *)
+ header->by_type.regatta_frame.data_start, 1);
+ break;
+ default:
+ printk("%s ** REGATTA TYPE %d SUBPACKET\n",
+ err_print_prefix, header->type);
+ el_annotate_subpacket(header);
+ break;
+ }
+
+
+ return (struct el_subpacket *)((unsigned long)header + header->length);
+}
+
+static struct el_subpacket_handler titan_subpacket_handler =
+ SUBPACKET_HANDLER_INIT(EL_CLASS__REGATTA_FAMILY,
+ el_process_regatta_subpacket);
+
+void
+titan_register_error_handlers(void)
+{
+ size_t i;
+
+ for (i = 0; i < ARRAY_SIZE (el_titan_annotations); i++)
+ cdl_register_subpacket_annotation(&el_titan_annotations[i]);
+
+ cdl_register_subpacket_handler(&titan_subpacket_handler);
+
+ ev6_register_error_handlers();
+}
+
+
+/*
+ * Privateer
+ */
+
+static int
+privateer_process_680_frame(struct el_common *mchk_header, int print)
+{
+ int status = MCHK_DISPOSITION_UNKNOWN_ERROR;
+#ifdef CONFIG_VERBOSE_MCHECK
+ struct el_PRIVATEER_envdata_mcheck *emchk =
+ (struct el_PRIVATEER_envdata_mcheck *)
+ ((unsigned long)mchk_header + mchk_header->sys_offset);
+
+ /* TODO - catagorize errors, for now, no error */
+
+ if (!print)
+ return status;
+
+ /* TODO - decode instead of just dumping... */
+ printk("%s Summary Flags: %016lx\n"
+ " CChip DIRx: %016lx\n"
+ " System Management IR: %016lx\n"
+ " CPU IR: %016lx\n"
+ " Power Supply IR: %016lx\n"
+ " LM78 Fault Status: %016lx\n"
+ " System Doors: %016lx\n"
+ " Temperature Warning: %016lx\n"
+ " Fan Control: %016lx\n"
+ " Fatal Power Down Code: %016lx\n",
+ err_print_prefix,
+ emchk->summary,
+ emchk->c_dirx,
+ emchk->smir,
+ emchk->cpuir,
+ emchk->psir,
+ emchk->fault,
+ emchk->sys_doors,
+ emchk->temp_warn,
+ emchk->fan_ctrl,
+ emchk->code);
+#endif /* CONFIG_VERBOSE_MCHECK */
+
+ return status;
+}
+
+int
+privateer_process_logout_frame(struct el_common *mchk_header, int print)
+{
+ struct el_common_EV6_mcheck *ev6mchk =
+ (struct el_common_EV6_mcheck *)mchk_header;
+ int status = MCHK_DISPOSITION_UNKNOWN_ERROR;
+
+ /*
+ * Machine check codes
+ */
+#define PRIVATEER_MCHK__CORR_ECC 0x86 /* 630 */
+#define PRIVATEER_MCHK__DC_TAG_PERR 0x9E /* 630 */
+#define PRIVATEER_MCHK__PAL_BUGCHECK 0x8E /* 670 */
+#define PRIVATEER_MCHK__OS_BUGCHECK 0x90 /* 670 */
+#define PRIVATEER_MCHK__PROC_HRD_ERR 0x98 /* 670 */
+#define PRIVATEER_MCHK__ISTREAM_CMOV_PRX 0xA0 /* 670 */
+#define PRIVATEER_MCHK__ISTREAM_CMOV_FLT 0xA2 /* 670 */
+#define PRIVATEER_MCHK__SYS_HRD_ERR 0x202 /* 660 */
+#define PRIVATEER_MCHK__SYS_CORR_ERR 0x204 /* 620 */
+#define PRIVATEER_MCHK__SYS_ENVIRON 0x206 /* 680 */
+
+ switch(ev6mchk->MCHK_Code) {
+ /*
+ * Vector 630 - Processor, Correctable
+ */
+ case PRIVATEER_MCHK__CORR_ECC:
+ case PRIVATEER_MCHK__DC_TAG_PERR:
+ /*
+ * Fall through to vector 670 for processing...
+ */
+ /*
+ * Vector 670 - Processor, Uncorrectable
+ */
+ case PRIVATEER_MCHK__PAL_BUGCHECK:
+ case PRIVATEER_MCHK__OS_BUGCHECK:
+ case PRIVATEER_MCHK__PROC_HRD_ERR:
+ case PRIVATEER_MCHK__ISTREAM_CMOV_PRX:
+ case PRIVATEER_MCHK__ISTREAM_CMOV_FLT:
+ status |= ev6_process_logout_frame(mchk_header, print);
+ break;
+
+ /*
+ * Vector 620 - System, Correctable
+ */
+ case PRIVATEER_MCHK__SYS_CORR_ERR:
+ /*
+ * Fall through to vector 660 for processing...
+ */
+ /*
+ * Vector 660 - System, Uncorrectable
+ */
+ case PRIVATEER_MCHK__SYS_HRD_ERR:
+ status |= titan_process_logout_frame(mchk_header, print);
+ break;
+
+ /*
+ * Vector 680 - System, Environmental
+ */
+ case PRIVATEER_MCHK__SYS_ENVIRON: /* System, Environmental */
+ status |= privateer_process_680_frame(mchk_header, print);
+ break;
+
+ /*
+ * Unknown
+ */
+ default:
+ status |= MCHK_DISPOSITION_REPORT;
+ if (print) {
+ printk("%s** Unknown Error, frame follows\n",
+ err_print_prefix);
+ mchk_dump_logout_frame(mchk_header);
+ }
+
+ }
+
+ return status;
+}
+
+void
+privateer_machine_check(u64 vector, u64 la_ptr, struct pt_regs *regs)
+{
+ struct el_common *mchk_header = (struct el_common *)la_ptr;
+ struct el_TITAN_sysdata_mcheck *tmchk =
+ (struct el_TITAN_sysdata_mcheck *)
+ (la_ptr + mchk_header->sys_offset);
+ u64 irqmask;
+ char *saved_err_prefix = err_print_prefix;
+
+#define PRIVATEER_680_INTERRUPT_MASK (0xE00UL)
+#define PRIVATEER_HOTPLUG_INTERRUPT_MASK (0xE00UL)
+
+ /*
+ * Sync the processor.
+ */
+ mb();
+ draina();
+
+ /*
+ * Only handle system events here.
+ */
+ if (vector != SCB_Q_SYSEVENT)
+ return titan_machine_check(vector, la_ptr, regs);
+
+ /*
+ * Report the event - System Events should be reported even if no
+ * error is indicated since the event could indicate the return
+ * to normal status.
+ */
+ err_print_prefix = KERN_CRIT;
+ printk("%s*System Event (Vector 0x%x) reported on CPU %d:\n",
+ err_print_prefix,
+ (unsigned int)vector, (int)smp_processor_id());
+ privateer_process_680_frame(mchk_header, 1);
+ err_print_prefix = saved_err_prefix;
+
+ /*
+ * Convert any pending interrupts which report as 680 machine
+ * checks to interrupts.
+ */
+ irqmask = tmchk->c_dirx & PRIVATEER_680_INTERRUPT_MASK;
+
+ /*
+ * Dispatch the interrupt(s).
+ */
+ titan_dispatch_irqs(irqmask, regs);
+
+ /*
+ * Release the logout frame.
+ */
+ wrmces(0x7);
+ mb();
+}
diff --git a/arch/alpha/kernel/es1888.c b/arch/alpha/kernel/es1888.c
new file mode 100644
index 000000000000..d584c85fea7a
--- /dev/null
+++ b/arch/alpha/kernel/es1888.c
@@ -0,0 +1,49 @@
+/*
+ * linux/arch/alpha/kernel/es1888.c
+ *
+ * Init the built-in ES1888 sound chip (SB16 compatible)
+ */
+
+#include <linux/init.h>
+#include <asm/io.h>
+#include "proto.h"
+
+void __init
+es1888_init(void)
+{
+ /* Sequence of IO reads to init the audio controller */
+ inb(0x0229);
+ inb(0x0229);
+ inb(0x0229);
+ inb(0x022b);
+ inb(0x0229);
+ inb(0x022b);
+ inb(0x0229);
+ inb(0x0229);
+ inb(0x022b);
+ inb(0x0229);
+ inb(0x0220); /* This sets the base address to 0x220 */
+
+ /* Sequence to set DMA channels */
+ outb(0x01, 0x0226); /* reset */
+ inb(0x0226); /* pause */
+ outb(0x00, 0x0226); /* release reset */
+ while (!(inb(0x022e) & 0x80)) /* wait for bit 7 to assert*/
+ continue;
+ inb(0x022a); /* pause */
+ outb(0xc6, 0x022c); /* enable extended mode */
+ inb(0x022a); /* pause, also forces the write */
+ while (inb(0x022c) & 0x80) /* wait for bit 7 to deassert */
+ continue;
+ outb(0xb1, 0x022c); /* setup for write to Interrupt CR */
+ while (inb(0x022c) & 0x80) /* wait for bit 7 to deassert */
+ continue;
+ outb(0x14, 0x022c); /* set IRQ 5 */
+ while (inb(0x022c) & 0x80) /* wait for bit 7 to deassert */
+ continue;
+ outb(0xb2, 0x022c); /* setup for write to DMA CR */
+ while (inb(0x022c) & 0x80) /* wait for bit 7 to deassert */
+ continue;
+ outb(0x18, 0x022c); /* set DMA channel 1 */
+ inb(0x022c); /* force the write */
+}
diff --git a/arch/alpha/kernel/gct.c b/arch/alpha/kernel/gct.c
new file mode 100644
index 000000000000..8827687b9f89
--- /dev/null
+++ b/arch/alpha/kernel/gct.c
@@ -0,0 +1,48 @@
+/*
+ * linux/arch/alpha/kernel/gct.c
+ */
+
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+
+#include <asm/hwrpb.h>
+#include <asm/gct.h>
+
+int
+gct6_find_nodes(gct6_node *node, gct6_search_struct *search)
+{
+ gct6_search_struct *wanted;
+ int status = 0;
+
+ /* First check the magic number. */
+ if (node->magic != GCT_NODE_MAGIC) {
+ printk(KERN_ERR "GCT Node MAGIC incorrect - GCT invalid\n");
+ return -EINVAL;
+ }
+
+ /* Check against the search struct. */
+ for (wanted = search;
+ wanted && (wanted->type | wanted->subtype);
+ wanted++) {
+ if (node->type != wanted->type)
+ continue;
+ if (node->subtype != wanted->subtype)
+ continue;
+
+ /* Found it -- call out. */
+ if (wanted->callout)
+ wanted->callout(node);
+ }
+
+ /* Now walk the tree, siblings first. */
+ if (node->next)
+ status |= gct6_find_nodes(GCT_NODE_PTR(node->next), search);
+
+ /* Then the children. */
+ if (node->child)
+ status |= gct6_find_nodes(GCT_NODE_PTR(node->child), search);
+
+ return status;
+}
diff --git a/arch/alpha/kernel/head.S b/arch/alpha/kernel/head.S
new file mode 100644
index 000000000000..4ca2e404708a
--- /dev/null
+++ b/arch/alpha/kernel/head.S
@@ -0,0 +1,99 @@
+/*
+ * alpha/boot/head.S
+ *
+ * initial boot stuff.. At this point, the bootloader has already
+ * switched into OSF/1 PAL-code, and loaded us at the correct address
+ * (START_ADDR). So there isn't much left for us to do: just set up
+ * the kernel global pointer and jump to the kernel entry-point.
+ */
+
+#include <linux/config.h>
+#include <asm/system.h>
+#include <asm/asm_offsets.h>
+
+.globl swapper_pg_dir
+.globl _stext
+swapper_pg_dir=SWAPPER_PGD
+
+ .set noreorder
+ .globl __start
+ .ent __start
+_stext:
+__start:
+ .prologue 0
+ br $27,1f
+1: ldgp $29,0($27)
+ /* We need to get current_task_info loaded up... */
+ lda $8,init_thread_union
+ /* ... and find our stack ... */
+ lda $30,0x4000 - SIZEOF_PT_REGS($8)
+ /* ... and then we can start the kernel. */
+ jsr $26,start_kernel
+ call_pal PAL_halt
+ .end __start
+
+#ifdef CONFIG_SMP
+ .align 3
+ .globl __smp_callin
+ .ent __smp_callin
+ /* On entry here from SRM console, the HWPCB of the per-cpu
+ slot for this processor has been loaded. We've arranged
+ for the UNIQUE value for this process to contain the PCBB
+ of the target idle task. */
+__smp_callin:
+ .prologue 1
+ ldgp $29,0($27) # First order of business, load the GP.
+
+ call_pal PAL_rduniq # Grab the target PCBB.
+ mov $0,$16 # Install it.
+ call_pal PAL_swpctx
+
+ lda $8,0x3fff # Find "current".
+ bic $30,$8,$8
+
+ jsr $26,smp_callin
+ call_pal PAL_halt
+ .end __smp_callin
+#endif /* CONFIG_SMP */
+
+ #
+ # The following two functions are needed for supporting SRM PALcode
+ # on the PC164 (at least), since that PALcode manages the interrupt
+ # masking, and we cannot duplicate the effort without causing problems
+ #
+
+ .align 3
+ .globl cserve_ena
+ .ent cserve_ena
+cserve_ena:
+ .prologue 0
+ bis $16,$16,$17
+ lda $16,52($31)
+ call_pal PAL_cserve
+ ret ($26)
+ .end cserve_ena
+
+ .align 3
+ .globl cserve_dis
+ .ent cserve_dis
+cserve_dis:
+ .prologue 0
+ bis $16,$16,$17
+ lda $16,53($31)
+ call_pal PAL_cserve
+ ret ($26)
+ .end cserve_dis
+
+ #
+ # It is handy, on occasion, to make halt actually just loop.
+ # Putting it here means we dont have to recompile the whole
+ # kernel.
+ #
+
+ .align 3
+ .globl halt
+ .ent halt
+halt:
+ .prologue 0
+ call_pal PAL_halt
+ .end halt
diff --git a/arch/alpha/kernel/init_task.c b/arch/alpha/kernel/init_task.c
new file mode 100644
index 000000000000..835d09a7b332
--- /dev/null
+++ b/arch/alpha/kernel/init_task.c
@@ -0,0 +1,23 @@
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/init.h>
+#include <linux/init_task.h>
+#include <linux/fs.h>
+#include <linux/mqueue.h>
+#include <asm/uaccess.h>
+
+
+static struct fs_struct init_fs = INIT_FS;
+static struct files_struct init_files = INIT_FILES;
+static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
+static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
+struct mm_struct init_mm = INIT_MM(init_mm);
+struct task_struct init_task = INIT_TASK(init_task);
+
+EXPORT_SYMBOL(init_mm);
+EXPORT_SYMBOL(init_task);
+
+union thread_union init_thread_union
+ __attribute__((section(".data.init_thread")))
+ = { INIT_THREAD_INFO(init_task) };
diff --git a/arch/alpha/kernel/io.c b/arch/alpha/kernel/io.c
new file mode 100644
index 000000000000..19c5875ab398
--- /dev/null
+++ b/arch/alpha/kernel/io.c
@@ -0,0 +1,630 @@
+/*
+ * Alpha IO and memory functions.
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/module.h>
+#include <asm/io.h>
+
+/* Out-of-line versions of the i/o routines that redirect into the
+ platform-specific version. Note that "platform-specific" may mean
+ "generic", which bumps through the machine vector. */
+
+unsigned int
+ioread8(void __iomem *addr)
+{
+ unsigned int ret = IO_CONCAT(__IO_PREFIX,ioread8)(addr);
+ mb();
+ return ret;
+}
+
+unsigned int ioread16(void __iomem *addr)
+{
+ unsigned int ret = IO_CONCAT(__IO_PREFIX,ioread16)(addr);
+ mb();
+ return ret;
+}
+
+unsigned int ioread32(void __iomem *addr)
+{
+ unsigned int ret = IO_CONCAT(__IO_PREFIX,ioread32)(addr);
+ mb();
+ return ret;
+}
+
+void iowrite8(u8 b, void __iomem *addr)
+{
+ IO_CONCAT(__IO_PREFIX,iowrite8)(b, addr);
+ mb();
+}
+
+void iowrite16(u16 b, void __iomem *addr)
+{
+ IO_CONCAT(__IO_PREFIX,iowrite16)(b, addr);
+ mb();
+}
+
+void iowrite32(u32 b, void __iomem *addr)
+{
+ IO_CONCAT(__IO_PREFIX,iowrite32)(b, addr);
+ mb();
+}
+
+EXPORT_SYMBOL(ioread8);
+EXPORT_SYMBOL(ioread16);
+EXPORT_SYMBOL(ioread32);
+EXPORT_SYMBOL(iowrite8);
+EXPORT_SYMBOL(iowrite16);
+EXPORT_SYMBOL(iowrite32);
+
+u8 inb(unsigned long port)
+{
+ return ioread8(ioport_map(port, 1));
+}
+
+u16 inw(unsigned long port)
+{
+ return ioread16(ioport_map(port, 2));
+}
+
+u32 inl(unsigned long port)
+{
+ return ioread32(ioport_map(port, 4));
+}
+
+void outb(u8 b, unsigned long port)
+{
+ iowrite8(b, ioport_map(port, 1));
+}
+
+void outw(u16 b, unsigned long port)
+{
+ iowrite16(b, ioport_map(port, 2));
+}
+
+void outl(u32 b, unsigned long port)
+{
+ iowrite32(b, ioport_map(port, 4));
+}
+
+EXPORT_SYMBOL(inb);
+EXPORT_SYMBOL(inw);
+EXPORT_SYMBOL(inl);
+EXPORT_SYMBOL(outb);
+EXPORT_SYMBOL(outw);
+EXPORT_SYMBOL(outl);
+
+u8 __raw_readb(const volatile void __iomem *addr)
+{
+ return IO_CONCAT(__IO_PREFIX,readb)(addr);
+}
+
+u16 __raw_readw(const volatile void __iomem *addr)
+{
+ return IO_CONCAT(__IO_PREFIX,readw)(addr);
+}
+
+u32 __raw_readl(const volatile void __iomem *addr)
+{
+ return IO_CONCAT(__IO_PREFIX,readl)(addr);
+}
+
+u64 __raw_readq(const volatile void __iomem *addr)
+{
+ return IO_CONCAT(__IO_PREFIX,readq)(addr);
+}
+
+void __raw_writeb(u8 b, volatile void __iomem *addr)
+{
+ IO_CONCAT(__IO_PREFIX,writeb)(b, addr);
+}
+
+void __raw_writew(u16 b, volatile void __iomem *addr)
+{
+ IO_CONCAT(__IO_PREFIX,writew)(b, addr);
+}
+
+void __raw_writel(u32 b, volatile void __iomem *addr)
+{
+ IO_CONCAT(__IO_PREFIX,writel)(b, addr);
+}
+
+void __raw_writeq(u64 b, volatile void __iomem *addr)
+{
+ IO_CONCAT(__IO_PREFIX,writeq)(b, addr);
+}
+
+EXPORT_SYMBOL(__raw_readb);
+EXPORT_SYMBOL(__raw_readw);
+EXPORT_SYMBOL(__raw_readl);
+EXPORT_SYMBOL(__raw_readq);
+EXPORT_SYMBOL(__raw_writeb);
+EXPORT_SYMBOL(__raw_writew);
+EXPORT_SYMBOL(__raw_writel);
+EXPORT_SYMBOL(__raw_writeq);
+
+u8 readb(const volatile void __iomem *addr)
+{
+ u8 ret = __raw_readb(addr);
+ mb();
+ return ret;
+}
+
+u16 readw(const volatile void __iomem *addr)
+{
+ u16 ret = __raw_readw(addr);
+ mb();
+ return ret;
+}
+
+u32 readl(const volatile void __iomem *addr)
+{
+ u32 ret = __raw_readl(addr);
+ mb();
+ return ret;
+}
+
+u64 readq(const volatile void __iomem *addr)
+{
+ u64 ret = __raw_readq(addr);
+ mb();
+ return ret;
+}
+
+void writeb(u8 b, volatile void __iomem *addr)
+{
+ __raw_writeb(b, addr);
+ mb();
+}
+
+void writew(u16 b, volatile void __iomem *addr)
+{
+ __raw_writew(b, addr);
+ mb();
+}
+
+void writel(u32 b, volatile void __iomem *addr)
+{
+ __raw_writel(b, addr);
+ mb();
+}
+
+void writeq(u64 b, volatile void __iomem *addr)
+{
+ __raw_writeq(b, addr);
+ mb();
+}
+
+EXPORT_SYMBOL(readb);
+EXPORT_SYMBOL(readw);
+EXPORT_SYMBOL(readl);
+EXPORT_SYMBOL(readq);
+EXPORT_SYMBOL(writeb);
+EXPORT_SYMBOL(writew);
+EXPORT_SYMBOL(writel);
+EXPORT_SYMBOL(writeq);
+
+
+/*
+ * Read COUNT 8-bit bytes from port PORT into memory starting at SRC.
+ */
+void ioread8_rep(void __iomem *port, void *dst, unsigned long count)
+{
+ while ((unsigned long)dst & 0x3) {
+ if (!count)
+ return;
+ count--;
+ *(unsigned char *)dst = ioread8(port);
+ dst += 1;
+ }
+
+ while (count >= 4) {
+ unsigned int w;
+ count -= 4;
+ w = ioread8(port);
+ w |= ioread8(port) << 8;
+ w |= ioread8(port) << 16;
+ w |= ioread8(port) << 24;
+ *(unsigned int *)dst = w;
+ dst += 4;
+ }
+
+ while (count) {
+ --count;
+ *(unsigned char *)dst = ioread8(port);
+ dst += 1;
+ }
+}
+
+void insb(unsigned long port, void *dst, unsigned long count)
+{
+ ioread8_rep(ioport_map(port, 1), dst, count);
+}
+
+EXPORT_SYMBOL(ioread8_rep);
+EXPORT_SYMBOL(insb);
+
+/*
+ * Read COUNT 16-bit words from port PORT into memory starting at
+ * SRC. SRC must be at least short aligned. This is used by the
+ * IDE driver to read disk sectors. Performance is important, but
+ * the interfaces seems to be slow: just using the inlined version
+ * of the inw() breaks things.
+ */
+void ioread16_rep(void __iomem *port, void *dst, unsigned long count)
+{
+ if (unlikely((unsigned long)dst & 0x3)) {
+ if (!count)
+ return;
+ BUG_ON((unsigned long)dst & 0x1);
+ count--;
+ *(unsigned short *)dst = ioread16(port);
+ dst += 2;
+ }
+
+ while (count >= 2) {
+ unsigned int w;
+ count -= 2;
+ w = ioread16(port);
+ w |= ioread16(port) << 16;
+ *(unsigned int *)dst = w;
+ dst += 4;
+ }
+
+ if (count) {
+ *(unsigned short*)dst = ioread16(port);
+ }
+}
+
+void insw(unsigned long port, void *dst, unsigned long count)
+{
+ ioread16_rep(ioport_map(port, 2), dst, count);
+}
+
+EXPORT_SYMBOL(ioread16_rep);
+EXPORT_SYMBOL(insw);
+
+
+/*
+ * Read COUNT 32-bit words from port PORT into memory starting at
+ * SRC. Now works with any alignment in SRC. Performance is important,
+ * but the interfaces seems to be slow: just using the inlined version
+ * of the inl() breaks things.
+ */
+void ioread32_rep(void __iomem *port, void *dst, unsigned long count)
+{
+ if (unlikely((unsigned long)dst & 0x3)) {
+ while (count--) {
+ struct S { int x __attribute__((packed)); };
+ ((struct S *)dst)->x = ioread32(port);
+ dst += 4;
+ }
+ } else {
+ /* Buffer 32-bit aligned. */
+ while (count--) {
+ *(unsigned int *)dst = ioread32(port);
+ dst += 4;
+ }
+ }
+}
+
+void insl(unsigned long port, void *dst, unsigned long count)
+{
+ ioread32_rep(ioport_map(port, 4), dst, count);
+}
+
+EXPORT_SYMBOL(ioread32_rep);
+EXPORT_SYMBOL(insl);
+
+
+/*
+ * Like insb but in the opposite direction.
+ * Don't worry as much about doing aligned memory transfers:
+ * doing byte reads the "slow" way isn't nearly as slow as
+ * doing byte writes the slow way (no r-m-w cycle).
+ */
+void iowrite8_rep(void __iomem *port, const void *xsrc, unsigned long count)
+{
+ const unsigned char *src = xsrc;
+ while (count--)
+ iowrite8(*src++, port);
+}
+
+void outsb(unsigned long port, const void *src, unsigned long count)
+{
+ iowrite8_rep(ioport_map(port, 1), src, count);
+}
+
+EXPORT_SYMBOL(iowrite8_rep);
+EXPORT_SYMBOL(outsb);
+
+
+/*
+ * Like insw but in the opposite direction. This is used by the IDE
+ * driver to write disk sectors. Performance is important, but the
+ * interfaces seems to be slow: just using the inlined version of the
+ * outw() breaks things.
+ */
+void iowrite16_rep(void __iomem *port, const void *src, unsigned long count)
+{
+ if (unlikely((unsigned long)src & 0x3)) {
+ if (!count)
+ return;
+ BUG_ON((unsigned long)src & 0x1);
+ iowrite16(*(unsigned short *)src, port);
+ src += 2;
+ --count;
+ }
+
+ while (count >= 2) {
+ unsigned int w;
+ count -= 2;
+ w = *(unsigned int *)src;
+ src += 4;
+ iowrite16(w >> 0, port);
+ iowrite16(w >> 16, port);
+ }
+
+ if (count) {
+ iowrite16(*(unsigned short *)src, port);
+ }
+}
+
+void outsw(unsigned long port, const void *src, unsigned long count)
+{
+ iowrite16_rep(ioport_map(port, 2), src, count);
+}
+
+EXPORT_SYMBOL(iowrite16_rep);
+EXPORT_SYMBOL(outsw);
+
+
+/*
+ * Like insl but in the opposite direction. This is used by the IDE
+ * driver to write disk sectors. Works with any alignment in SRC.
+ * Performance is important, but the interfaces seems to be slow:
+ * just using the inlined version of the outl() breaks things.
+ */
+void iowrite32_rep(void __iomem *port, const void *src, unsigned long count)
+{
+ if (unlikely((unsigned long)src & 0x3)) {
+ while (count--) {
+ struct S { int x __attribute__((packed)); };
+ iowrite32(((struct S *)src)->x, port);
+ src += 4;
+ }
+ } else {
+ /* Buffer 32-bit aligned. */
+ while (count--) {
+ iowrite32(*(unsigned int *)src, port);
+ src += 4;
+ }
+ }
+}
+
+void outsl(unsigned long port, const void *src, unsigned long count)
+{
+ iowrite32_rep(ioport_map(port, 4), src, count);
+}
+
+EXPORT_SYMBOL(iowrite32_rep);
+EXPORT_SYMBOL(outsl);
+
+
+/*
+ * Copy data from IO memory space to "real" memory space.
+ * This needs to be optimized.
+ */
+void memcpy_fromio(void *to, const volatile void __iomem *from, long count)
+{
+ /* Optimize co-aligned transfers. Everything else gets handled
+ a byte at a time. */
+
+ if (count >= 8 && ((u64)to & 7) == ((u64)from & 7)) {
+ count -= 8;
+ do {
+ *(u64 *)to = __raw_readq(from);
+ count -= 8;
+ to += 8;
+ from += 8;
+ } while (count >= 0);
+ count += 8;
+ }
+
+ if (count >= 4 && ((u64)to & 3) == ((u64)from & 3)) {
+ count -= 4;
+ do {
+ *(u32 *)to = __raw_readl(from);
+ count -= 4;
+ to += 4;
+ from += 4;
+ } while (count >= 0);
+ count += 4;
+ }
+
+ if (count >= 2 && ((u64)to & 1) == ((u64)from & 1)) {
+ count -= 2;
+ do {
+ *(u16 *)to = __raw_readw(from);
+ count -= 2;
+ to += 2;
+ from += 2;
+ } while (count >= 0);
+ count += 2;
+ }
+
+ while (count > 0) {
+ *(u8 *) to = __raw_readb(from);
+ count--;
+ to++;
+ from++;
+ }
+ mb();
+}
+
+EXPORT_SYMBOL(memcpy_fromio);
+
+
+/*
+ * Copy data from "real" memory space to IO memory space.
+ * This needs to be optimized.
+ */
+void memcpy_toio(volatile void __iomem *to, const void *from, long count)
+{
+ /* Optimize co-aligned transfers. Everything else gets handled
+ a byte at a time. */
+ /* FIXME -- align FROM. */
+
+ if (count >= 8 && ((u64)to & 7) == ((u64)from & 7)) {
+ count -= 8;
+ do {
+ __raw_writeq(*(const u64 *)from, to);
+ count -= 8;
+ to += 8;
+ from += 8;
+ } while (count >= 0);
+ count += 8;
+ }
+
+ if (count >= 4 && ((u64)to & 3) == ((u64)from & 3)) {
+ count -= 4;
+ do {
+ __raw_writel(*(const u32 *)from, to);
+ count -= 4;
+ to += 4;
+ from += 4;
+ } while (count >= 0);
+ count += 4;
+ }
+
+ if (count >= 2 && ((u64)to & 1) == ((u64)from & 1)) {
+ count -= 2;
+ do {
+ __raw_writew(*(const u16 *)from, to);
+ count -= 2;
+ to += 2;
+ from += 2;
+ } while (count >= 0);
+ count += 2;
+ }
+
+ while (count > 0) {
+ __raw_writeb(*(const u8 *) from, to);
+ count--;
+ to++;
+ from++;
+ }
+ mb();
+}
+
+EXPORT_SYMBOL(memcpy_toio);
+
+
+/*
+ * "memset" on IO memory space.
+ */
+void _memset_c_io(volatile void __iomem *to, unsigned long c, long count)
+{
+ /* Handle any initial odd byte */
+ if (count > 0 && ((u64)to & 1)) {
+ __raw_writeb(c, to);
+ to++;
+ count--;
+ }
+
+ /* Handle any initial odd halfword */
+ if (count >= 2 && ((u64)to & 2)) {
+ __raw_writew(c, to);
+ to += 2;
+ count -= 2;
+ }
+
+ /* Handle any initial odd word */
+ if (count >= 4 && ((u64)to & 4)) {
+ __raw_writel(c, to);
+ to += 4;
+ count -= 4;
+ }
+
+ /* Handle all full-sized quadwords: we're aligned
+ (or have a small count) */
+ count -= 8;
+ if (count >= 0) {
+ do {
+ __raw_writeq(c, to);
+ to += 8;
+ count -= 8;
+ } while (count >= 0);
+ }
+ count += 8;
+
+ /* The tail is word-aligned if we still have count >= 4 */
+ if (count >= 4) {
+ __raw_writel(c, to);
+ to += 4;
+ count -= 4;
+ }
+
+ /* The tail is half-word aligned if we have count >= 2 */
+ if (count >= 2) {
+ __raw_writew(c, to);
+ to += 2;
+ count -= 2;
+ }
+
+ /* And finally, one last byte.. */
+ if (count) {
+ __raw_writeb(c, to);
+ }
+ mb();
+}
+
+EXPORT_SYMBOL(_memset_c_io);
+
+/* A version of memcpy used by the vga console routines to move data around
+ arbitrarily between screen and main memory. */
+
+void
+scr_memcpyw(u16 *d, const u16 *s, unsigned int count)
+{
+ const u16 __iomem *ios = (const u16 __iomem *) s;
+ u16 __iomem *iod = (u16 __iomem *) d;
+ int s_isio = __is_ioaddr(s);
+ int d_isio = __is_ioaddr(d);
+
+ if (s_isio) {
+ if (d_isio) {
+ /* FIXME: Should handle unaligned ops and
+ operation widening. */
+
+ count /= 2;
+ while (count--) {
+ u16 tmp = __raw_readw(ios++);
+ __raw_writew(tmp, iod++);
+ }
+ }
+ else
+ memcpy_fromio(d, ios, count);
+ } else {
+ if (d_isio)
+ memcpy_toio(iod, s, count);
+ else
+ memcpy(d, s, count);
+ }
+}
+
+EXPORT_SYMBOL(scr_memcpyw);
+
+void __iomem *ioport_map(unsigned long port, unsigned int size)
+{
+ return IO_CONCAT(__IO_PREFIX,ioportmap) (port);
+}
+
+void ioport_unmap(void __iomem *addr)
+{
+}
+
+EXPORT_SYMBOL(ioport_map);
+EXPORT_SYMBOL(ioport_unmap);
diff --git a/arch/alpha/kernel/irq.c b/arch/alpha/kernel/irq.c
new file mode 100644
index 000000000000..b6114f5c0d2b
--- /dev/null
+++ b/arch/alpha/kernel/irq.c
@@ -0,0 +1,774 @@
+/*
+ * linux/arch/alpha/kernel/irq.c
+ *
+ * Copyright (C) 1995 Linus Torvalds
+ *
+ * This file contains the code used by various IRQ handling routines:
+ * asking for different IRQ's should be done through these routines
+ * instead of just grabbing them. Thus setups with different IRQ numbers
+ * shouldn't result in any weird surprises, and installing new handlers
+ * should be easier.
+ */
+
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/kernel_stat.h>
+#include <linux/signal.h>
+#include <linux/sched.h>
+#include <linux/ptrace.h>
+#include <linux/interrupt.h>
+#include <linux/slab.h>
+#include <linux/random.h>
+#include <linux/init.h>
+#include <linux/irq.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+#include <linux/profile.h>
+#include <linux/bitops.h>
+
+#include <asm/system.h>
+#include <asm/io.h>
+#include <asm/uaccess.h>
+
+/*
+ * Controller mappings for all interrupt sources:
+ */
+irq_desc_t irq_desc[NR_IRQS] __cacheline_aligned = {
+ [0 ... NR_IRQS-1] = {
+ .handler = &no_irq_type,
+ .lock = SPIN_LOCK_UNLOCKED
+ }
+};
+
+static void register_irq_proc(unsigned int irq);
+
+volatile unsigned long irq_err_count;
+
+/*
+ * Special irq handlers.
+ */
+
+irqreturn_t no_action(int cpl, void *dev_id, struct pt_regs *regs)
+{
+ return IRQ_NONE;
+}
+
+/*
+ * Generic no controller code
+ */
+
+static void no_irq_enable_disable(unsigned int irq) { }
+static unsigned int no_irq_startup(unsigned int irq) { return 0; }
+
+static void
+no_irq_ack(unsigned int irq)
+{
+ irq_err_count++;
+ printk(KERN_CRIT "Unexpected IRQ trap at vector %u\n", irq);
+}
+
+struct hw_interrupt_type no_irq_type = {
+ .typename = "none",
+ .startup = no_irq_startup,
+ .shutdown = no_irq_enable_disable,
+ .enable = no_irq_enable_disable,
+ .disable = no_irq_enable_disable,
+ .ack = no_irq_ack,
+ .end = no_irq_enable_disable,
+};
+
+int
+handle_IRQ_event(unsigned int irq, struct pt_regs *regs,
+ struct irqaction *action)
+{
+ int status = 1; /* Force the "do bottom halves" bit */
+ int ret;
+
+ do {
+ if (!(action->flags & SA_INTERRUPT))
+ local_irq_enable();
+ else
+ local_irq_disable();
+
+ ret = action->handler(irq, action->dev_id, regs);
+ if (ret == IRQ_HANDLED)
+ status |= action->flags;
+ action = action->next;
+ } while (action);
+ if (status & SA_SAMPLE_RANDOM)
+ add_interrupt_randomness(irq);
+ local_irq_disable();
+
+ return status;
+}
+
+/*
+ * Generic enable/disable code: this just calls
+ * down into the PIC-specific version for the actual
+ * hardware disable after having gotten the irq
+ * controller lock.
+ */
+void inline
+disable_irq_nosync(unsigned int irq)
+{
+ irq_desc_t *desc = irq_desc + irq;
+ unsigned long flags;
+
+ spin_lock_irqsave(&desc->lock, flags);
+ if (!desc->depth++) {
+ desc->status |= IRQ_DISABLED;
+ desc->handler->disable(irq);
+ }
+ spin_unlock_irqrestore(&desc->lock, flags);
+}
+
+/*
+ * Synchronous version of the above, making sure the IRQ is
+ * no longer running on any other IRQ..
+ */
+void
+disable_irq(unsigned int irq)
+{
+ disable_irq_nosync(irq);
+ synchronize_irq(irq);
+}
+
+void
+enable_irq(unsigned int irq)
+{
+ irq_desc_t *desc = irq_desc + irq;
+ unsigned long flags;
+
+ spin_lock_irqsave(&desc->lock, flags);
+ switch (desc->depth) {
+ case 1: {
+ unsigned int status = desc->status & ~IRQ_DISABLED;
+ desc->status = status;
+ if ((status & (IRQ_PENDING | IRQ_REPLAY)) == IRQ_PENDING) {
+ desc->status = status | IRQ_REPLAY;
+ hw_resend_irq(desc->handler,irq);
+ }
+ desc->handler->enable(irq);
+ /* fall-through */
+ }
+ default:
+ desc->depth--;
+ break;
+ case 0:
+ printk(KERN_ERR "enable_irq() unbalanced from %p\n",
+ __builtin_return_address(0));
+ }
+ spin_unlock_irqrestore(&desc->lock, flags);
+}
+
+int
+setup_irq(unsigned int irq, struct irqaction * new)
+{
+ int shared = 0;
+ struct irqaction *old, **p;
+ unsigned long flags;
+ irq_desc_t *desc = irq_desc + irq;
+
+ if (desc->handler == &no_irq_type)
+ return -ENOSYS;
+
+ /*
+ * Some drivers like serial.c use request_irq() heavily,
+ * so we have to be careful not to interfere with a
+ * running system.
+ */
+ if (new->flags & SA_SAMPLE_RANDOM) {
+ /*
+ * This function might sleep, we want to call it first,
+ * outside of the atomic block.
+ * Yes, this might clear the entropy pool if the wrong
+ * driver is attempted to be loaded, without actually
+ * installing a new handler, but is this really a problem,
+ * only the sysadmin is able to do this.
+ */
+ rand_initialize_irq(irq);
+ }
+
+ /*
+ * The following block of code has to be executed atomically
+ */
+ spin_lock_irqsave(&desc->lock,flags);
+ p = &desc->action;
+ if ((old = *p) != NULL) {
+ /* Can't share interrupts unless both agree to */
+ if (!(old->flags & new->flags & SA_SHIRQ)) {
+ spin_unlock_irqrestore(&desc->lock,flags);
+ return -EBUSY;
+ }
+
+ /* add new interrupt at end of irq queue */
+ do {
+ p = &old->next;
+ old = *p;
+ } while (old);
+ shared = 1;
+ }
+
+ *p = new;
+
+ if (!shared) {
+ desc->depth = 0;
+ desc->status &=
+ ~(IRQ_DISABLED|IRQ_AUTODETECT|IRQ_WAITING|IRQ_INPROGRESS);
+ desc->handler->startup(irq);
+ }
+ spin_unlock_irqrestore(&desc->lock,flags);
+
+ return 0;
+}
+
+static struct proc_dir_entry * root_irq_dir;
+static struct proc_dir_entry * irq_dir[NR_IRQS];
+
+#ifdef CONFIG_SMP
+static struct proc_dir_entry * smp_affinity_entry[NR_IRQS];
+static char irq_user_affinity[NR_IRQS];
+static cpumask_t irq_affinity[NR_IRQS] = { [0 ... NR_IRQS-1] = CPU_MASK_ALL };
+
+static void
+select_smp_affinity(int irq)
+{
+ static int last_cpu;
+ int cpu = last_cpu + 1;
+
+ if (! irq_desc[irq].handler->set_affinity || irq_user_affinity[irq])
+ return;
+
+ while (!cpu_possible(cpu))
+ cpu = (cpu < (NR_CPUS-1) ? cpu + 1 : 0);
+ last_cpu = cpu;
+
+ irq_affinity[irq] = cpumask_of_cpu(cpu);
+ irq_desc[irq].handler->set_affinity(irq, cpumask_of_cpu(cpu));
+}
+
+static int
+irq_affinity_read_proc (char *page, char **start, off_t off,
+ int count, int *eof, void *data)
+{
+ int len = cpumask_scnprintf(page, count, irq_affinity[(long)data]);
+ if (count - len < 2)
+ return -EINVAL;
+ len += sprintf(page + len, "\n");
+ return len;
+}
+
+static int
+irq_affinity_write_proc(struct file *file, const char __user *buffer,
+ unsigned long count, void *data)
+{
+ int irq = (long) data, full_count = count, err;
+ cpumask_t new_value;
+
+ if (!irq_desc[irq].handler->set_affinity)
+ return -EIO;
+
+ err = cpumask_parse(buffer, count, new_value);
+
+ /* The special value 0 means release control of the
+ affinity to kernel. */
+ cpus_and(new_value, new_value, cpu_online_map);
+ if (cpus_empty(new_value)) {
+ irq_user_affinity[irq] = 0;
+ select_smp_affinity(irq);
+ }
+ /* Do not allow disabling IRQs completely - it's a too easy
+ way to make the system unusable accidentally :-) At least
+ one online CPU still has to be targeted. */
+ else {
+ irq_affinity[irq] = new_value;
+ irq_user_affinity[irq] = 1;
+ irq_desc[irq].handler->set_affinity(irq, new_value);
+ }
+
+ return full_count;
+}
+
+#endif /* CONFIG_SMP */
+
+#define MAX_NAMELEN 10
+
+static void
+register_irq_proc (unsigned int irq)
+{
+ char name [MAX_NAMELEN];
+
+ if (!root_irq_dir || (irq_desc[irq].handler == &no_irq_type) ||
+ irq_dir[irq])
+ return;
+
+ memset(name, 0, MAX_NAMELEN);
+ sprintf(name, "%d", irq);
+
+ /* create /proc/irq/1234 */
+ irq_dir[irq] = proc_mkdir(name, root_irq_dir);
+
+#ifdef CONFIG_SMP
+ if (irq_desc[irq].handler->set_affinity) {
+ struct proc_dir_entry *entry;
+ /* create /proc/irq/1234/smp_affinity */
+ entry = create_proc_entry("smp_affinity", 0600, irq_dir[irq]);
+
+ if (entry) {
+ entry->nlink = 1;
+ entry->data = (void *)(long)irq;
+ entry->read_proc = irq_affinity_read_proc;
+ entry->write_proc = irq_affinity_write_proc;
+ }
+
+ smp_affinity_entry[irq] = entry;
+ }
+#endif
+}
+
+void
+init_irq_proc (void)
+{
+ int i;
+
+ /* create /proc/irq */
+ root_irq_dir = proc_mkdir("irq", NULL);
+
+#ifdef CONFIG_SMP
+ /* create /proc/irq/prof_cpu_mask */
+ create_prof_cpu_mask(root_irq_dir);
+#endif
+
+ /*
+ * Create entries for all existing IRQs.
+ */
+ for (i = 0; i < ACTUAL_NR_IRQS; i++) {
+ if (irq_desc[i].handler == &no_irq_type)
+ continue;
+ register_irq_proc(i);
+ }
+}
+
+int
+request_irq(unsigned int irq, irqreturn_t (*handler)(int, void *, struct pt_regs *),
+ unsigned long irqflags, const char * devname, void *dev_id)
+{
+ int retval;
+ struct irqaction * action;
+
+ if (irq >= ACTUAL_NR_IRQS)
+ return -EINVAL;
+ if (!handler)
+ return -EINVAL;
+
+#if 1
+ /*
+ * Sanity-check: shared interrupts should REALLY pass in
+ * a real dev-ID, otherwise we'll have trouble later trying
+ * to figure out which interrupt is which (messes up the
+ * interrupt freeing logic etc).
+ */
+ if ((irqflags & SA_SHIRQ) && !dev_id) {
+ printk(KERN_ERR
+ "Bad boy: %s (at %p) called us without a dev_id!\n",
+ devname, __builtin_return_address(0));
+ }
+#endif
+
+ action = (struct irqaction *)
+ kmalloc(sizeof(struct irqaction), GFP_KERNEL);
+ if (!action)
+ return -ENOMEM;
+
+ action->handler = handler;
+ action->flags = irqflags;
+ cpus_clear(action->mask);
+ action->name = devname;
+ action->next = NULL;
+ action->dev_id = dev_id;
+
+#ifdef CONFIG_SMP
+ select_smp_affinity(irq);
+#endif
+
+ retval = setup_irq(irq, action);
+ if (retval)
+ kfree(action);
+ return retval;
+}
+
+EXPORT_SYMBOL(request_irq);
+
+void
+free_irq(unsigned int irq, void *dev_id)
+{
+ irq_desc_t *desc;
+ struct irqaction **p;
+ unsigned long flags;
+
+ if (irq >= ACTUAL_NR_IRQS) {
+ printk(KERN_CRIT "Trying to free IRQ%d\n", irq);
+ return;
+ }
+
+ desc = irq_desc + irq;
+ spin_lock_irqsave(&desc->lock,flags);
+ p = &desc->action;
+ for (;;) {
+ struct irqaction * action = *p;
+ if (action) {
+ struct irqaction **pp = p;
+ p = &action->next;
+ if (action->dev_id != dev_id)
+ continue;
+
+ /* Found - now remove it from the list of entries. */
+ *pp = action->next;
+ if (!desc->action) {
+ desc->status |= IRQ_DISABLED;
+ desc->handler->shutdown(irq);
+ }
+ spin_unlock_irqrestore(&desc->lock,flags);
+
+#ifdef CONFIG_SMP
+ /* Wait to make sure it's not being used on
+ another CPU. */
+ while (desc->status & IRQ_INPROGRESS)
+ barrier();
+#endif
+ kfree(action);
+ return;
+ }
+ printk(KERN_ERR "Trying to free free IRQ%d\n",irq);
+ spin_unlock_irqrestore(&desc->lock,flags);
+ return;
+ }
+}
+
+EXPORT_SYMBOL(free_irq);
+
+int
+show_interrupts(struct seq_file *p, void *v)
+{
+#ifdef CONFIG_SMP
+ int j;
+#endif
+ int i = *(loff_t *) v;
+ struct irqaction * action;
+ unsigned long flags;
+
+#ifdef CONFIG_SMP
+ if (i == 0) {
+ seq_puts(p, " ");
+ for (i = 0; i < NR_CPUS; i++)
+ if (cpu_online(i))
+ seq_printf(p, "CPU%d ", i);
+ seq_putc(p, '\n');
+ }
+#endif
+
+ if (i < ACTUAL_NR_IRQS) {
+ spin_lock_irqsave(&irq_desc[i].lock, flags);
+ action = irq_desc[i].action;
+ if (!action)
+ goto unlock;
+ seq_printf(p, "%3d: ",i);
+#ifndef CONFIG_SMP
+ seq_printf(p, "%10u ", kstat_irqs(i));
+#else
+ for (j = 0; j < NR_CPUS; j++)
+ if (cpu_online(j))
+ seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
+#endif
+ seq_printf(p, " %14s", irq_desc[i].handler->typename);
+ seq_printf(p, " %c%s",
+ (action->flags & SA_INTERRUPT)?'+':' ',
+ action->name);
+
+ for (action=action->next; action; action = action->next) {
+ seq_printf(p, ", %c%s",
+ (action->flags & SA_INTERRUPT)?'+':' ',
+ action->name);
+ }
+
+ seq_putc(p, '\n');
+unlock:
+ spin_unlock_irqrestore(&irq_desc[i].lock, flags);
+ } else if (i == ACTUAL_NR_IRQS) {
+#ifdef CONFIG_SMP
+ seq_puts(p, "IPI: ");
+ for (i = 0; i < NR_CPUS; i++)
+ if (cpu_online(i))
+ seq_printf(p, "%10lu ", cpu_data[i].ipi_count);
+ seq_putc(p, '\n');
+#endif
+ seq_printf(p, "ERR: %10lu\n", irq_err_count);
+ }
+ return 0;
+}
+
+
+/*
+ * handle_irq handles all normal device IRQ's (the special
+ * SMP cross-CPU interrupts have their own specific
+ * handlers).
+ */
+
+#define MAX_ILLEGAL_IRQS 16
+
+void
+handle_irq(int irq, struct pt_regs * regs)
+{
+ /*
+ * We ack quickly, we don't want the irq controller
+ * thinking we're snobs just because some other CPU has
+ * disabled global interrupts (we have already done the
+ * INT_ACK cycles, it's too late to try to pretend to the
+ * controller that we aren't taking the interrupt).
+ *
+ * 0 return value means that this irq is already being
+ * handled by some other CPU. (or is disabled)
+ */
+ int cpu = smp_processor_id();
+ irq_desc_t *desc = irq_desc + irq;
+ struct irqaction * action;
+ unsigned int status;
+ static unsigned int illegal_count=0;
+
+ if ((unsigned) irq > ACTUAL_NR_IRQS && illegal_count < MAX_ILLEGAL_IRQS ) {
+ irq_err_count++;
+ illegal_count++;
+ printk(KERN_CRIT "device_interrupt: invalid interrupt %d\n",
+ irq);
+ return;
+ }
+
+ irq_enter();
+ kstat_cpu(cpu).irqs[irq]++;
+ spin_lock_irq(&desc->lock); /* mask also the higher prio events */
+ desc->handler->ack(irq);
+ /*
+ * REPLAY is when Linux resends an IRQ that was dropped earlier.
+ * WAITING is used by probe to mark irqs that are being tested.
+ */
+ status = desc->status & ~(IRQ_REPLAY | IRQ_WAITING);
+ status |= IRQ_PENDING; /* we _want_ to handle it */
+
+ /*
+ * If the IRQ is disabled for whatever reason, we cannot
+ * use the action we have.
+ */
+ action = NULL;
+ if (!(status & (IRQ_DISABLED | IRQ_INPROGRESS))) {
+ action = desc->action;
+ status &= ~IRQ_PENDING; /* we commit to handling */
+ status |= IRQ_INPROGRESS; /* we are handling it */
+ }
+ desc->status = status;
+
+ /*
+ * If there is no IRQ handler or it was disabled, exit early.
+ * Since we set PENDING, if another processor is handling
+ * a different instance of this same irq, the other processor
+ * will take care of it.
+ */
+ if (!action)
+ goto out;
+
+ /*
+ * Edge triggered interrupts need to remember pending events.
+ * This applies to any hw interrupts that allow a second
+ * instance of the same irq to arrive while we are in handle_irq
+ * or in the handler. But the code here only handles the _second_
+ * instance of the irq, not the third or fourth. So it is mostly
+ * useful for irq hardware that does not mask cleanly in an
+ * SMP environment.
+ */
+ for (;;) {
+ spin_unlock(&desc->lock);
+ handle_IRQ_event(irq, regs, action);
+ spin_lock(&desc->lock);
+
+ if (!(desc->status & IRQ_PENDING)
+ || (desc->status & IRQ_LEVEL))
+ break;
+ desc->status &= ~IRQ_PENDING;
+ }
+ desc->status &= ~IRQ_INPROGRESS;
+out:
+ /*
+ * The ->end() handler has to deal with interrupts which got
+ * disabled while the handler was running.
+ */
+ desc->handler->end(irq);
+ spin_unlock(&desc->lock);
+
+ irq_exit();
+}
+
+/*
+ * IRQ autodetection code..
+ *
+ * This depends on the fact that any interrupt that
+ * comes in on to an unassigned handler will get stuck
+ * with "IRQ_WAITING" cleared and the interrupt
+ * disabled.
+ */
+unsigned long
+probe_irq_on(void)
+{
+ int i;
+ irq_desc_t *desc;
+ unsigned long delay;
+ unsigned long val;
+
+ /* Something may have generated an irq long ago and we want to
+ flush such a longstanding irq before considering it as spurious. */
+ for (i = NR_IRQS-1; i >= 0; i--) {
+ desc = irq_desc + i;
+
+ spin_lock_irq(&desc->lock);
+ if (!irq_desc[i].action)
+ irq_desc[i].handler->startup(i);
+ spin_unlock_irq(&desc->lock);
+ }
+
+ /* Wait for longstanding interrupts to trigger. */
+ for (delay = jiffies + HZ/50; time_after(delay, jiffies); )
+ /* about 20ms delay */ barrier();
+
+ /* enable any unassigned irqs (we must startup again here because
+ if a longstanding irq happened in the previous stage, it may have
+ masked itself) first, enable any unassigned irqs. */
+ for (i = NR_IRQS-1; i >= 0; i--) {
+ desc = irq_desc + i;
+
+ spin_lock_irq(&desc->lock);
+ if (!desc->action) {
+ desc->status |= IRQ_AUTODETECT | IRQ_WAITING;
+ if (desc->handler->startup(i))
+ desc->status |= IRQ_PENDING;
+ }
+ spin_unlock_irq(&desc->lock);
+ }
+
+ /*
+ * Wait for spurious interrupts to trigger
+ */
+ for (delay = jiffies + HZ/10; time_after(delay, jiffies); )
+ /* about 100ms delay */ barrier();
+
+ /*
+ * Now filter out any obviously spurious interrupts
+ */
+ val = 0;
+ for (i=0; i<NR_IRQS; i++) {
+ irq_desc_t *desc = irq_desc + i;
+ unsigned int status;
+
+ spin_lock_irq(&desc->lock);
+ status = desc->status;
+
+ if (status & IRQ_AUTODETECT) {
+ /* It triggered already - consider it spurious. */
+ if (!(status & IRQ_WAITING)) {
+ desc->status = status & ~IRQ_AUTODETECT;
+ desc->handler->shutdown(i);
+ } else
+ if (i < 32)
+ val |= 1 << i;
+ }
+ spin_unlock_irq(&desc->lock);
+ }
+
+ return val;
+}
+
+EXPORT_SYMBOL(probe_irq_on);
+
+/*
+ * Return a mask of triggered interrupts (this
+ * can handle only legacy ISA interrupts).
+ */
+unsigned int
+probe_irq_mask(unsigned long val)
+{
+ int i;
+ unsigned int mask;
+
+ mask = 0;
+ for (i = 0; i < NR_IRQS; i++) {
+ irq_desc_t *desc = irq_desc + i;
+ unsigned int status;
+
+ spin_lock_irq(&desc->lock);
+ status = desc->status;
+
+ if (status & IRQ_AUTODETECT) {
+ /* We only react to ISA interrupts */
+ if (!(status & IRQ_WAITING)) {
+ if (i < 16)
+ mask |= 1 << i;
+ }
+
+ desc->status = status & ~IRQ_AUTODETECT;
+ desc->handler->shutdown(i);
+ }
+ spin_unlock_irq(&desc->lock);
+ }
+
+ return mask & val;
+}
+
+/*
+ * Get the result of the IRQ probe.. A negative result means that
+ * we have several candidates (but we return the lowest-numbered
+ * one).
+ */
+
+int
+probe_irq_off(unsigned long val)
+{
+ int i, irq_found, nr_irqs;
+
+ nr_irqs = 0;
+ irq_found = 0;
+ for (i=0; i<NR_IRQS; i++) {
+ irq_desc_t *desc = irq_desc + i;
+ unsigned int status;
+
+ spin_lock_irq(&desc->lock);
+ status = desc->status;
+
+ if (status & IRQ_AUTODETECT) {
+ if (!(status & IRQ_WAITING)) {
+ if (!nr_irqs)
+ irq_found = i;
+ nr_irqs++;
+ }
+ desc->status = status & ~IRQ_AUTODETECT;
+ desc->handler->shutdown(i);
+ }
+ spin_unlock_irq(&desc->lock);
+ }
+
+ if (nr_irqs > 1)
+ irq_found = -irq_found;
+ return irq_found;
+}
+
+EXPORT_SYMBOL(probe_irq_off);
+
+#ifdef CONFIG_SMP
+void synchronize_irq(unsigned int irq)
+{
+ /* is there anything to synchronize with? */
+ if (!irq_desc[irq].action)
+ return;
+
+ while (irq_desc[irq].status & IRQ_INPROGRESS)
+ barrier();
+}
+#endif
diff --git a/arch/alpha/kernel/irq_alpha.c b/arch/alpha/kernel/irq_alpha.c
new file mode 100644
index 000000000000..e6ded33c6e22
--- /dev/null
+++ b/arch/alpha/kernel/irq_alpha.c
@@ -0,0 +1,252 @@
+/*
+ * Alpha specific irq code.
+ */
+
+#include <linux/config.h>
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/irq.h>
+#include <linux/kernel_stat.h>
+
+#include <asm/machvec.h>
+#include <asm/dma.h>
+
+#include "proto.h"
+#include "irq_impl.h"
+
+/* Hack minimum IPL during interrupt processing for broken hardware. */
+#ifdef CONFIG_ALPHA_BROKEN_IRQ_MASK
+int __min_ipl;
+#endif
+
+/*
+ * Performance counter hook. A module can override this to
+ * do something useful.
+ */
+static void
+dummy_perf(unsigned long vector, struct pt_regs *regs)
+{
+ irq_err_count++;
+ printk(KERN_CRIT "Performance counter interrupt!\n");
+}
+
+void (*perf_irq)(unsigned long, struct pt_regs *) = dummy_perf;
+
+/*
+ * The main interrupt entry point.
+ */
+
+asmlinkage void
+do_entInt(unsigned long type, unsigned long vector,
+ unsigned long la_ptr, struct pt_regs *regs)
+{
+ switch (type) {
+ case 0:
+#ifdef CONFIG_SMP
+ handle_ipi(regs);
+ return;
+#else
+ irq_err_count++;
+ printk(KERN_CRIT "Interprocessor interrupt? "
+ "You must be kidding!\n");
+#endif
+ break;
+ case 1:
+#ifdef CONFIG_SMP
+ {
+ long cpu;
+ smp_percpu_timer_interrupt(regs);
+ cpu = smp_processor_id();
+ if (cpu != boot_cpuid) {
+ kstat_cpu(cpu).irqs[RTC_IRQ]++;
+ } else {
+ handle_irq(RTC_IRQ, regs);
+ }
+ }
+#else
+ handle_irq(RTC_IRQ, regs);
+#endif
+ return;
+ case 2:
+ alpha_mv.machine_check(vector, la_ptr, regs);
+ return;
+ case 3:
+ alpha_mv.device_interrupt(vector, regs);
+ return;
+ case 4:
+ perf_irq(la_ptr, regs);
+ return;
+ default:
+ printk(KERN_CRIT "Hardware intr %ld %lx? Huh?\n",
+ type, vector);
+ }
+ printk(KERN_CRIT "PC = %016lx PS=%04lx\n", regs->pc, regs->ps);
+}
+
+void __init
+common_init_isa_dma(void)
+{
+ outb(0, DMA1_RESET_REG);
+ outb(0, DMA2_RESET_REG);
+ outb(0, DMA1_CLR_MASK_REG);
+ outb(0, DMA2_CLR_MASK_REG);
+}
+
+void __init
+init_IRQ(void)
+{
+ /* Just in case the platform init_irq() causes interrupts/mchecks
+ (as is the case with RAWHIDE, at least). */
+ wrent(entInt, 0);
+
+ alpha_mv.init_irq();
+}
+
+/*
+ * machine error checks
+ */
+#define MCHK_K_TPERR 0x0080
+#define MCHK_K_TCPERR 0x0082
+#define MCHK_K_HERR 0x0084
+#define MCHK_K_ECC_C 0x0086
+#define MCHK_K_ECC_NC 0x0088
+#define MCHK_K_OS_BUGCHECK 0x008A
+#define MCHK_K_PAL_BUGCHECK 0x0090
+
+#ifndef CONFIG_SMP
+struct mcheck_info __mcheck_info;
+#endif
+
+void
+process_mcheck_info(unsigned long vector, unsigned long la_ptr,
+ struct pt_regs *regs, const char *machine,
+ int expected)
+{
+ struct el_common *mchk_header;
+ const char *reason;
+
+ /*
+ * See if the machine check is due to a badaddr() and if so,
+ * ignore it.
+ */
+
+#ifdef CONFIG_VERBOSE_MCHECK
+ if (alpha_verbose_mcheck > 1) {
+ printk(KERN_CRIT "%s machine check %s\n", machine,
+ expected ? "expected." : "NOT expected!!!");
+ }
+#endif
+
+ if (expected) {
+ int cpu = smp_processor_id();
+ mcheck_expected(cpu) = 0;
+ mcheck_taken(cpu) = 1;
+ return;
+ }
+
+ mchk_header = (struct el_common *)la_ptr;
+
+ printk(KERN_CRIT "%s machine check: vector=0x%lx pc=0x%lx code=0x%x\n",
+ machine, vector, regs->pc, mchk_header->code);
+
+ switch (mchk_header->code) {
+ /* Machine check reasons. Defined according to PALcode sources. */
+ case 0x80: reason = "tag parity error"; break;
+ case 0x82: reason = "tag control parity error"; break;
+ case 0x84: reason = "generic hard error"; break;
+ case 0x86: reason = "correctable ECC error"; break;
+ case 0x88: reason = "uncorrectable ECC error"; break;
+ case 0x8A: reason = "OS-specific PAL bugcheck"; break;
+ case 0x90: reason = "callsys in kernel mode"; break;
+ case 0x96: reason = "i-cache read retryable error"; break;
+ case 0x98: reason = "processor detected hard error"; break;
+
+ /* System specific (these are for Alcor, at least): */
+ case 0x202: reason = "system detected hard error"; break;
+ case 0x203: reason = "system detected uncorrectable ECC error"; break;
+ case 0x204: reason = "SIO SERR occurred on PCI bus"; break;
+ case 0x205: reason = "parity error detected by core logic"; break;
+ case 0x206: reason = "SIO IOCHK occurred on ISA bus"; break;
+ case 0x207: reason = "non-existent memory error"; break;
+ case 0x208: reason = "MCHK_K_DCSR"; break;
+ case 0x209: reason = "PCI SERR detected"; break;
+ case 0x20b: reason = "PCI data parity error detected"; break;
+ case 0x20d: reason = "PCI address parity error detected"; break;
+ case 0x20f: reason = "PCI master abort error"; break;
+ case 0x211: reason = "PCI target abort error"; break;
+ case 0x213: reason = "scatter/gather PTE invalid error"; break;
+ case 0x215: reason = "flash ROM write error"; break;
+ case 0x217: reason = "IOA timeout detected"; break;
+ case 0x219: reason = "IOCHK#, EISA add-in board parity or other catastrophic error"; break;
+ case 0x21b: reason = "EISA fail-safe timer timeout"; break;
+ case 0x21d: reason = "EISA bus time-out"; break;
+ case 0x21f: reason = "EISA software generated NMI"; break;
+ case 0x221: reason = "unexpected ev5 IRQ[3] interrupt"; break;
+ default: reason = "unknown"; break;
+ }
+
+ printk(KERN_CRIT "machine check type: %s%s\n",
+ reason, mchk_header->retry ? " (retryable)" : "");
+
+ dik_show_regs(regs, NULL);
+
+#ifdef CONFIG_VERBOSE_MCHECK
+ if (alpha_verbose_mcheck > 1) {
+ /* Dump the logout area to give all info. */
+ unsigned long *ptr = (unsigned long *)la_ptr;
+ long i;
+ for (i = 0; i < mchk_header->size / sizeof(long); i += 2) {
+ printk(KERN_CRIT " +%8lx %016lx %016lx\n",
+ i*sizeof(long), ptr[i], ptr[i+1]);
+ }
+ }
+#endif /* CONFIG_VERBOSE_MCHECK */
+}
+
+/*
+ * The special RTC interrupt type. The interrupt itself was
+ * processed by PALcode, and comes in via entInt vector 1.
+ */
+
+static void rtc_enable_disable(unsigned int irq) { }
+static unsigned int rtc_startup(unsigned int irq) { return 0; }
+
+struct irqaction timer_irqaction = {
+ .handler = timer_interrupt,
+ .flags = SA_INTERRUPT,
+ .name = "timer",
+};
+
+static struct hw_interrupt_type rtc_irq_type = {
+ .typename = "RTC",
+ .startup = rtc_startup,
+ .shutdown = rtc_enable_disable,
+ .enable = rtc_enable_disable,
+ .disable = rtc_enable_disable,
+ .ack = rtc_enable_disable,
+ .end = rtc_enable_disable,
+};
+
+void __init
+init_rtc_irq(void)
+{
+ irq_desc[RTC_IRQ].status = IRQ_DISABLED;
+ irq_desc[RTC_IRQ].handler = &rtc_irq_type;
+ setup_irq(RTC_IRQ, &timer_irqaction);
+}
+
+/* Dummy irqactions. */
+struct irqaction isa_cascade_irqaction = {
+ .handler = no_action,
+ .name = "isa-cascade"
+};
+
+struct irqaction timer_cascade_irqaction = {
+ .handler = no_action,
+ .name = "timer-cascade"
+};
+
+struct irqaction halt_switch_irqaction = {
+ .handler = no_action,
+ .name = "halt-switch"
+};
diff --git a/arch/alpha/kernel/irq_i8259.c b/arch/alpha/kernel/irq_i8259.c
new file mode 100644
index 000000000000..b188683b83fd
--- /dev/null
+++ b/arch/alpha/kernel/irq_i8259.c
@@ -0,0 +1,183 @@
+/*
+ * linux/arch/alpha/kernel/irq_i8259.c
+ *
+ * This is the 'legacy' 8259A Programmable Interrupt Controller,
+ * present in the majority of PC/AT boxes.
+ *
+ * Started hacking from linux-2.3.30pre6/arch/i386/kernel/i8259.c.
+ */
+
+#include <linux/config.h>
+#include <linux/init.h>
+#include <linux/cache.h>
+#include <linux/sched.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+
+#include <asm/io.h>
+
+#include "proto.h"
+#include "irq_impl.h"
+
+
+/* Note mask bit is true for DISABLED irqs. */
+static unsigned int cached_irq_mask = 0xffff;
+static DEFINE_SPINLOCK(i8259_irq_lock);
+
+static inline void
+i8259_update_irq_hw(unsigned int irq, unsigned long mask)
+{
+ int port = 0x21;
+ if (irq & 8) mask >>= 8;
+ if (irq & 8) port = 0xA1;
+ outb(mask, port);
+}
+
+inline void
+i8259a_enable_irq(unsigned int irq)
+{
+ spin_lock(&i8259_irq_lock);
+ i8259_update_irq_hw(irq, cached_irq_mask &= ~(1 << irq));
+ spin_unlock(&i8259_irq_lock);
+}
+
+static inline void
+__i8259a_disable_irq(unsigned int irq)
+{
+ i8259_update_irq_hw(irq, cached_irq_mask |= 1 << irq);
+}
+
+void
+i8259a_disable_irq(unsigned int irq)
+{
+ spin_lock(&i8259_irq_lock);
+ __i8259a_disable_irq(irq);
+ spin_unlock(&i8259_irq_lock);
+}
+
+void
+i8259a_mask_and_ack_irq(unsigned int irq)
+{
+ spin_lock(&i8259_irq_lock);
+ __i8259a_disable_irq(irq);
+
+ /* Ack the interrupt making it the lowest priority. */
+ if (irq >= 8) {
+ outb(0xE0 | (irq - 8), 0xa0); /* ack the slave */
+ irq = 2;
+ }
+ outb(0xE0 | irq, 0x20); /* ack the master */
+ spin_unlock(&i8259_irq_lock);
+}
+
+unsigned int
+i8259a_startup_irq(unsigned int irq)
+{
+ i8259a_enable_irq(irq);
+ return 0; /* never anything pending */
+}
+
+void
+i8259a_end_irq(unsigned int irq)
+{
+ if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS)))
+ i8259a_enable_irq(irq);
+}
+
+struct hw_interrupt_type i8259a_irq_type = {
+ .typename = "XT-PIC",
+ .startup = i8259a_startup_irq,
+ .shutdown = i8259a_disable_irq,
+ .enable = i8259a_enable_irq,
+ .disable = i8259a_disable_irq,
+ .ack = i8259a_mask_and_ack_irq,
+ .end = i8259a_end_irq,
+};
+
+void __init
+init_i8259a_irqs(void)
+{
+ static struct irqaction cascade = {
+ .handler = no_action,
+ .name = "cascade",
+ };
+
+ long i;
+
+ outb(0xff, 0x21); /* mask all of 8259A-1 */
+ outb(0xff, 0xA1); /* mask all of 8259A-2 */
+
+ for (i = 0; i < 16; i++) {
+ irq_desc[i].status = IRQ_DISABLED;
+ irq_desc[i].handler = &i8259a_irq_type;
+ }
+
+ setup_irq(2, &cascade);
+}
+
+
+#if defined(CONFIG_ALPHA_GENERIC)
+# define IACK_SC alpha_mv.iack_sc
+#elif defined(CONFIG_ALPHA_APECS)
+# define IACK_SC APECS_IACK_SC
+#elif defined(CONFIG_ALPHA_LCA)
+# define IACK_SC LCA_IACK_SC
+#elif defined(CONFIG_ALPHA_CIA)
+# define IACK_SC CIA_IACK_SC
+#elif defined(CONFIG_ALPHA_PYXIS)
+# define IACK_SC PYXIS_IACK_SC
+#elif defined(CONFIG_ALPHA_TITAN)
+# define IACK_SC TITAN_IACK_SC
+#elif defined(CONFIG_ALPHA_TSUNAMI)
+# define IACK_SC TSUNAMI_IACK_SC
+#elif defined(CONFIG_ALPHA_IRONGATE)
+# define IACK_SC IRONGATE_IACK_SC
+#endif
+/* Note that CONFIG_ALPHA_POLARIS is intentionally left out here, since
+ sys_rx164 wants to use isa_no_iack_sc_device_interrupt for some reason. */
+
+#if defined(IACK_SC)
+void
+isa_device_interrupt(unsigned long vector, struct pt_regs *regs)
+{
+ /*
+ * Generate a PCI interrupt acknowledge cycle. The PIC will
+ * respond with the interrupt vector of the highest priority
+ * interrupt that is pending. The PALcode sets up the
+ * interrupts vectors such that irq level L generates vector L.
+ */
+ int j = *(vuip) IACK_SC;
+ j &= 0xff;
+ handle_irq(j, regs);
+}
+#endif
+
+#if defined(CONFIG_ALPHA_GENERIC) || !defined(IACK_SC)
+void
+isa_no_iack_sc_device_interrupt(unsigned long vector, struct pt_regs *regs)
+{
+ unsigned long pic;
+
+ /*
+ * It seems to me that the probability of two or more *device*
+ * interrupts occurring at almost exactly the same time is
+ * pretty low. So why pay the price of checking for
+ * additional interrupts here if the common case can be
+ * handled so much easier?
+ */
+ /*
+ * The first read of gives you *all* interrupting lines.
+ * Therefore, read the mask register and and out those lines
+ * not enabled. Note that some documentation has 21 and a1
+ * write only. This is not true.
+ */
+ pic = inb(0x20) | (inb(0xA0) << 8); /* read isr */
+ pic &= 0xFFFB; /* mask out cascade & hibits */
+
+ while (pic) {
+ int j = ffz(~pic);
+ pic &= pic - 1;
+ handle_irq(j, regs);
+ }
+}
+#endif
diff --git a/arch/alpha/kernel/irq_impl.h b/arch/alpha/kernel/irq_impl.h
new file mode 100644
index 000000000000..f201d8ffc0d9
--- /dev/null
+++ b/arch/alpha/kernel/irq_impl.h
@@ -0,0 +1,42 @@
+/*
+ * linux/arch/alpha/kernel/irq_impl.h
+ *
+ * Copyright (C) 1995 Linus Torvalds
+ * Copyright (C) 1998, 2000 Richard Henderson
+ *
+ * This file contains declarations and inline functions for interfacing
+ * with the IRQ handling routines in irq.c.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/profile.h>
+
+
+#define RTC_IRQ 8
+
+extern void isa_device_interrupt(unsigned long, struct pt_regs *);
+extern void isa_no_iack_sc_device_interrupt(unsigned long, struct pt_regs *);
+extern void srm_device_interrupt(unsigned long, struct pt_regs *);
+extern void pyxis_device_interrupt(unsigned long, struct pt_regs *);
+
+extern struct irqaction timer_irqaction;
+extern struct irqaction isa_cascade_irqaction;
+extern struct irqaction timer_cascade_irqaction;
+extern struct irqaction halt_switch_irqaction;
+
+extern void init_srm_irqs(long, unsigned long);
+extern void init_pyxis_irqs(unsigned long);
+extern void init_rtc_irq(void);
+
+extern void common_init_isa_dma(void);
+
+extern void i8259a_enable_irq(unsigned int);
+extern void i8259a_disable_irq(unsigned int);
+extern void i8259a_mask_and_ack_irq(unsigned int);
+extern unsigned int i8259a_startup_irq(unsigned int);
+extern void i8259a_end_irq(unsigned int);
+extern struct hw_interrupt_type i8259a_irq_type;
+extern void init_i8259a_irqs(void);
+
+extern void handle_irq(int irq, struct pt_regs * regs);
diff --git a/arch/alpha/kernel/irq_pyxis.c b/arch/alpha/kernel/irq_pyxis.c
new file mode 100644
index 000000000000..146a20b9e3d5
--- /dev/null
+++ b/arch/alpha/kernel/irq_pyxis.c
@@ -0,0 +1,127 @@
+/*
+ * linux/arch/alpha/kernel/irq_pyxis.c
+ *
+ * Based on code written by David A Rusling (david.rusling@reo.mts.dec.com).
+ *
+ * IRQ Code common to all PYXIS core logic chips.
+ */
+
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/irq.h>
+
+#include <asm/io.h>
+#include <asm/core_cia.h>
+
+#include "proto.h"
+#include "irq_impl.h"
+
+
+/* Note mask bit is true for ENABLED irqs. */
+static unsigned long cached_irq_mask;
+
+static inline void
+pyxis_update_irq_hw(unsigned long mask)
+{
+ *(vulp)PYXIS_INT_MASK = mask;
+ mb();
+ *(vulp)PYXIS_INT_MASK;
+}
+
+static inline void
+pyxis_enable_irq(unsigned int irq)
+{
+ pyxis_update_irq_hw(cached_irq_mask |= 1UL << (irq - 16));
+}
+
+static void
+pyxis_disable_irq(unsigned int irq)
+{
+ pyxis_update_irq_hw(cached_irq_mask &= ~(1UL << (irq - 16)));
+}
+
+static unsigned int
+pyxis_startup_irq(unsigned int irq)
+{
+ pyxis_enable_irq(irq);
+ return 0;
+}
+
+static void
+pyxis_end_irq(unsigned int irq)
+{
+ if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS)))
+ pyxis_enable_irq(irq);
+}
+
+static void
+pyxis_mask_and_ack_irq(unsigned int irq)
+{
+ unsigned long bit = 1UL << (irq - 16);
+ unsigned long mask = cached_irq_mask &= ~bit;
+
+ /* Disable the interrupt. */
+ *(vulp)PYXIS_INT_MASK = mask;
+ wmb();
+ /* Ack PYXIS PCI interrupt. */
+ *(vulp)PYXIS_INT_REQ = bit;
+ mb();
+ /* Re-read to force both writes. */
+ *(vulp)PYXIS_INT_MASK;
+}
+
+static struct hw_interrupt_type pyxis_irq_type = {
+ .typename = "PYXIS",
+ .startup = pyxis_startup_irq,
+ .shutdown = pyxis_disable_irq,
+ .enable = pyxis_enable_irq,
+ .disable = pyxis_disable_irq,
+ .ack = pyxis_mask_and_ack_irq,
+ .end = pyxis_end_irq,
+};
+
+void
+pyxis_device_interrupt(unsigned long vector, struct pt_regs *regs)
+{
+ unsigned long pld;
+ unsigned int i;
+
+ /* Read the interrupt summary register of PYXIS */
+ pld = *(vulp)PYXIS_INT_REQ;
+ pld &= cached_irq_mask;
+
+ /*
+ * Now for every possible bit set, work through them and call
+ * the appropriate interrupt handler.
+ */
+ while (pld) {
+ i = ffz(~pld);
+ pld &= pld - 1; /* clear least bit set */
+ if (i == 7)
+ isa_device_interrupt(vector, regs);
+ else
+ handle_irq(16+i, regs);
+ }
+}
+
+void __init
+init_pyxis_irqs(unsigned long ignore_mask)
+{
+ long i;
+
+ *(vulp)PYXIS_INT_MASK = 0; /* disable all */
+ *(vulp)PYXIS_INT_REQ = -1; /* flush all */
+ mb();
+
+ /* Send -INTA pulses to clear any pending interrupts ...*/
+ *(vuip) CIA_IACK_SC;
+
+ for (i = 16; i < 48; ++i) {
+ if ((ignore_mask >> i) & 1)
+ continue;
+ irq_desc[i].status = IRQ_DISABLED | IRQ_LEVEL;
+ irq_desc[i].handler = &pyxis_irq_type;
+ }
+
+ setup_irq(16+7, &isa_cascade_irqaction);
+}
diff --git a/arch/alpha/kernel/irq_srm.c b/arch/alpha/kernel/irq_srm.c
new file mode 100644
index 000000000000..0a87e466918c
--- /dev/null
+++ b/arch/alpha/kernel/irq_srm.c
@@ -0,0 +1,79 @@
+/*
+ * Handle interrupts from the SRM, assuming no additional weirdness.
+ */
+
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/irq.h>
+
+#include "proto.h"
+#include "irq_impl.h"
+
+
+/*
+ * Is the palcode SMP safe? In other words: can we call cserve_ena/dis
+ * at the same time in multiple CPUs? To be safe I added a spinlock
+ * but it can be removed trivially if the palcode is robust against smp.
+ */
+DEFINE_SPINLOCK(srm_irq_lock);
+
+static inline void
+srm_enable_irq(unsigned int irq)
+{
+ spin_lock(&srm_irq_lock);
+ cserve_ena(irq - 16);
+ spin_unlock(&srm_irq_lock);
+}
+
+static void
+srm_disable_irq(unsigned int irq)
+{
+ spin_lock(&srm_irq_lock);
+ cserve_dis(irq - 16);
+ spin_unlock(&srm_irq_lock);
+}
+
+static unsigned int
+srm_startup_irq(unsigned int irq)
+{
+ srm_enable_irq(irq);
+ return 0;
+}
+
+static void
+srm_end_irq(unsigned int irq)
+{
+ if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS)))
+ srm_enable_irq(irq);
+}
+
+/* Handle interrupts from the SRM, assuming no additional weirdness. */
+static struct hw_interrupt_type srm_irq_type = {
+ .typename = "SRM",
+ .startup = srm_startup_irq,
+ .shutdown = srm_disable_irq,
+ .enable = srm_enable_irq,
+ .disable = srm_disable_irq,
+ .ack = srm_disable_irq,
+ .end = srm_end_irq,
+};
+
+void __init
+init_srm_irqs(long max, unsigned long ignore_mask)
+{
+ long i;
+
+ for (i = 16; i < max; ++i) {
+ if (i < 64 && ((ignore_mask >> i) & 1))
+ continue;
+ irq_desc[i].status = IRQ_DISABLED | IRQ_LEVEL;
+ irq_desc[i].handler = &srm_irq_type;
+ }
+}
+
+void
+srm_device_interrupt(unsigned long vector, struct pt_regs * regs)
+{
+ int irq = (vector - 0x800) >> 4;
+ handle_irq(irq, regs);
+}
diff --git a/arch/alpha/kernel/machvec_impl.h b/arch/alpha/kernel/machvec_impl.h
new file mode 100644
index 000000000000..4959b7a3e1e6
--- /dev/null
+++ b/arch/alpha/kernel/machvec_impl.h
@@ -0,0 +1,150 @@
+/*
+ * linux/arch/alpha/kernel/machvec.h
+ *
+ * Copyright (C) 1997, 1998 Richard Henderson
+ *
+ * This file has goodies to help simplify instantiation of machine vectors.
+ */
+
+#include <linux/config.h>
+#include <asm/pgalloc.h>
+
+/* Whee. These systems don't have an HAE:
+ IRONGATE, MARVEL, POLARIS, TSUNAMI, TITAN, WILDFIRE
+ Fix things up for the GENERIC kernel by defining the HAE address
+ to be that of the cache. Now we can read and write it as we like. ;-) */
+#define IRONGATE_HAE_ADDRESS (&alpha_mv.hae_cache)
+#define MARVEL_HAE_ADDRESS (&alpha_mv.hae_cache)
+#define POLARIS_HAE_ADDRESS (&alpha_mv.hae_cache)
+#define TSUNAMI_HAE_ADDRESS (&alpha_mv.hae_cache)
+#define TITAN_HAE_ADDRESS (&alpha_mv.hae_cache)
+#define WILDFIRE_HAE_ADDRESS (&alpha_mv.hae_cache)
+
+#ifdef CIA_ONE_HAE_WINDOW
+#define CIA_HAE_ADDRESS (&alpha_mv.hae_cache)
+#endif
+#ifdef MCPCIA_ONE_HAE_WINDOW
+#define MCPCIA_HAE_ADDRESS (&alpha_mv.hae_cache)
+#endif
+
+/* Only a few systems don't define IACK_SC, handling all interrupts through
+ the SRM console. But splitting out that one case from IO() below
+ seems like such a pain. Define this to get things to compile. */
+#define JENSEN_IACK_SC 1
+#define T2_IACK_SC 1
+#define WILDFIRE_IACK_SC 1 /* FIXME */
+
+/*
+ * Some helpful macros for filling in the blanks.
+ */
+
+#define CAT1(x,y) x##y
+#define CAT(x,y) CAT1(x,y)
+
+#define DO_DEFAULT_RTC rtc_port: 0x70
+
+#define DO_EV4_MMU \
+ .max_asn = EV4_MAX_ASN, \
+ .mv_switch_mm = ev4_switch_mm, \
+ .mv_activate_mm = ev4_activate_mm, \
+ .mv_flush_tlb_current = ev4_flush_tlb_current, \
+ .mv_flush_tlb_current_page = ev4_flush_tlb_current_page
+
+#define DO_EV5_MMU \
+ .max_asn = EV5_MAX_ASN, \
+ .mv_switch_mm = ev5_switch_mm, \
+ .mv_activate_mm = ev5_activate_mm, \
+ .mv_flush_tlb_current = ev5_flush_tlb_current, \
+ .mv_flush_tlb_current_page = ev5_flush_tlb_current_page
+
+#define DO_EV6_MMU \
+ .max_asn = EV6_MAX_ASN, \
+ .mv_switch_mm = ev5_switch_mm, \
+ .mv_activate_mm = ev5_activate_mm, \
+ .mv_flush_tlb_current = ev5_flush_tlb_current, \
+ .mv_flush_tlb_current_page = ev5_flush_tlb_current_page
+
+#define DO_EV7_MMU \
+ .max_asn = EV6_MAX_ASN, \
+ .mv_switch_mm = ev5_switch_mm, \
+ .mv_activate_mm = ev5_activate_mm, \
+ .mv_flush_tlb_current = ev5_flush_tlb_current, \
+ .mv_flush_tlb_current_page = ev5_flush_tlb_current_page
+
+#define IO_LITE(UP,low) \
+ .hae_register = (unsigned long *) CAT(UP,_HAE_ADDRESS), \
+ .iack_sc = CAT(UP,_IACK_SC), \
+ .mv_ioread8 = CAT(low,_ioread8), \
+ .mv_ioread16 = CAT(low,_ioread16), \
+ .mv_ioread32 = CAT(low,_ioread32), \
+ .mv_iowrite8 = CAT(low,_iowrite8), \
+ .mv_iowrite16 = CAT(low,_iowrite16), \
+ .mv_iowrite32 = CAT(low,_iowrite32), \
+ .mv_readb = CAT(low,_readb), \
+ .mv_readw = CAT(low,_readw), \
+ .mv_readl = CAT(low,_readl), \
+ .mv_readq = CAT(low,_readq), \
+ .mv_writeb = CAT(low,_writeb), \
+ .mv_writew = CAT(low,_writew), \
+ .mv_writel = CAT(low,_writel), \
+ .mv_writeq = CAT(low,_writeq), \
+ .mv_ioportmap = CAT(low,_ioportmap), \
+ .mv_ioremap = CAT(low,_ioremap), \
+ .mv_iounmap = CAT(low,_iounmap), \
+ .mv_is_ioaddr = CAT(low,_is_ioaddr), \
+ .mv_is_mmio = CAT(low,_is_mmio) \
+
+#define IO(UP,low) \
+ IO_LITE(UP,low), \
+ .pci_ops = &CAT(low,_pci_ops), \
+ .mv_pci_tbi = CAT(low,_pci_tbi)
+
+#define DO_APECS_IO IO(APECS,apecs)
+#define DO_CIA_IO IO(CIA,cia)
+#define DO_IRONGATE_IO IO(IRONGATE,irongate)
+#define DO_LCA_IO IO(LCA,lca)
+#define DO_MARVEL_IO IO(MARVEL,marvel)
+#define DO_MCPCIA_IO IO(MCPCIA,mcpcia)
+#define DO_POLARIS_IO IO(POLARIS,polaris)
+#define DO_T2_IO IO(T2,t2)
+#define DO_TSUNAMI_IO IO(TSUNAMI,tsunami)
+#define DO_TITAN_IO IO(TITAN,titan)
+#define DO_WILDFIRE_IO IO(WILDFIRE,wildfire)
+
+#define DO_PYXIS_IO IO_LITE(CIA,cia_bwx), \
+ .pci_ops = &cia_pci_ops, \
+ .mv_pci_tbi = cia_pci_tbi
+
+/*
+ * In a GENERIC kernel, we have lots of these vectors floating about,
+ * all but one of which we want to go away. In a non-GENERIC kernel,
+ * we want only one, ever.
+ *
+ * Accomplish this in the GENERIC kernel by putting all of the vectors
+ * in the .init.data section where they'll go away. We'll copy the
+ * one we want to the real alpha_mv vector in setup_arch.
+ *
+ * Accomplish this in a non-GENERIC kernel by ifdef'ing out all but
+ * one of the vectors, which will not reside in .init.data. We then
+ * alias this one vector to alpha_mv, so no copy is needed.
+ *
+ * Upshot: set __initdata to nothing for non-GENERIC kernels.
+ */
+
+#ifdef CONFIG_ALPHA_GENERIC
+#define __initmv __initdata
+#define ALIAS_MV(x)
+#else
+#define __initmv
+
+/* GCC actually has a syntax for defining aliases, but is under some
+ delusion that you shouldn't be able to declare it extern somewhere
+ else beforehand. Fine. We'll do it ourselves. */
+#if 0
+#define ALIAS_MV(system) \
+ struct alpha_machine_vector alpha_mv __attribute__((alias(#system "_mv")));
+#else
+#define ALIAS_MV(system) \
+ asm(".global alpha_mv\nalpha_mv = " #system "_mv");
+#endif
+#endif /* GENERIC */
diff --git a/arch/alpha/kernel/module.c b/arch/alpha/kernel/module.c
new file mode 100644
index 000000000000..fc271e316a38
--- /dev/null
+++ b/arch/alpha/kernel/module.c
@@ -0,0 +1,311 @@
+/* Kernel module help for Alpha.
+ Copyright (C) 2002 Richard Henderson.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+*/
+#include <linux/moduleloader.h>
+#include <linux/elf.h>
+#include <linux/vmalloc.h>
+#include <linux/fs.h>
+#include <linux/string.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+
+#if 0
+#define DEBUGP printk
+#else
+#define DEBUGP(fmt...)
+#endif
+
+void *
+module_alloc(unsigned long size)
+{
+ if (size == 0)
+ return NULL;
+ return vmalloc(size);
+}
+
+void
+module_free(struct module *mod, void *module_region)
+{
+ vfree(module_region);
+}
+
+/* Allocate the GOT at the end of the core sections. */
+
+struct got_entry {
+ struct got_entry *next;
+ Elf64_Addr r_offset;
+ int got_offset;
+};
+
+static inline void
+process_reloc_for_got(Elf64_Rela *rela,
+ struct got_entry *chains, Elf64_Xword *poffset)
+{
+ unsigned long r_sym = ELF64_R_SYM (rela->r_info);
+ unsigned long r_type = ELF64_R_TYPE (rela->r_info);
+ Elf64_Addr r_offset = rela->r_offset;
+ struct got_entry *g;
+
+ if (r_type != R_ALPHA_LITERAL)
+ return;
+
+ for (g = chains + r_sym; g ; g = g->next)
+ if (g->r_offset == r_offset) {
+ if (g->got_offset == 0) {
+ g->got_offset = *poffset;
+ *poffset += 8;
+ }
+ goto found_entry;
+ }
+
+ g = kmalloc (sizeof (*g), GFP_KERNEL);
+ g->next = chains[r_sym].next;
+ g->r_offset = r_offset;
+ g->got_offset = *poffset;
+ *poffset += 8;
+ chains[r_sym].next = g;
+
+ found_entry:
+ /* Trick: most of the ELF64_R_TYPE field is unused. There are
+ 42 valid relocation types, and a 32-bit field. Co-opt the
+ bits above 256 to store the got offset for this reloc. */
+ rela->r_info |= g->got_offset << 8;
+}
+
+int
+module_frob_arch_sections(Elf64_Ehdr *hdr, Elf64_Shdr *sechdrs,
+ char *secstrings, struct module *me)
+{
+ struct got_entry *chains;
+ Elf64_Rela *rela;
+ Elf64_Shdr *esechdrs, *symtab, *s, *got;
+ unsigned long nsyms, nrela, i;
+
+ esechdrs = sechdrs + hdr->e_shnum;
+ symtab = got = NULL;
+
+ /* Find out how large the symbol table is. Allocate one got_entry
+ head per symbol. Normally this will be enough, but not always.
+ We'll chain different offsets for the symbol down each head. */
+ for (s = sechdrs; s < esechdrs; ++s)
+ if (s->sh_type == SHT_SYMTAB)
+ symtab = s;
+ else if (!strcmp(".got", secstrings + s->sh_name)) {
+ got = s;
+ me->arch.gotsecindex = s - sechdrs;
+ }
+
+ if (!symtab) {
+ printk(KERN_ERR "module %s: no symbol table\n", me->name);
+ return -ENOEXEC;
+ }
+ if (!got) {
+ printk(KERN_ERR "module %s: no got section\n", me->name);
+ return -ENOEXEC;
+ }
+
+ nsyms = symtab->sh_size / sizeof(Elf64_Sym);
+ chains = kmalloc(nsyms * sizeof(struct got_entry), GFP_KERNEL);
+ memset(chains, 0, nsyms * sizeof(struct got_entry));
+
+ got->sh_size = 0;
+ got->sh_addralign = 8;
+ got->sh_type = SHT_NOBITS;
+
+ /* Examine all LITERAL relocations to find out what GOT entries
+ are required. This sizes the GOT section as well. */
+ for (s = sechdrs; s < esechdrs; ++s)
+ if (s->sh_type == SHT_RELA) {
+ nrela = s->sh_size / sizeof(Elf64_Rela);
+ rela = (void *)hdr + s->sh_offset;
+ for (i = 0; i < nrela; ++i)
+ process_reloc_for_got(rela+i, chains,
+ &got->sh_size);
+ }
+
+ /* Free the memory we allocated. */
+ for (i = 0; i < nsyms; ++i) {
+ struct got_entry *g, *n;
+ for (g = chains[i].next; g ; g = n) {
+ n = g->next;
+ kfree(g);
+ }
+ }
+ kfree(chains);
+
+ return 0;
+}
+
+int
+apply_relocate(Elf64_Shdr *sechdrs, const char *strtab, unsigned int symindex,
+ unsigned int relsec, struct module *me)
+{
+ printk(KERN_ERR "module %s: REL relocation unsupported\n", me->name);
+ return -ENOEXEC;
+}
+
+int
+apply_relocate_add(Elf64_Shdr *sechdrs, const char *strtab,
+ unsigned int symindex, unsigned int relsec,
+ struct module *me)
+{
+ Elf64_Rela *rela = (void *)sechdrs[relsec].sh_addr;
+ unsigned long i, n = sechdrs[relsec].sh_size / sizeof(*rela);
+ Elf64_Sym *symtab, *sym;
+ void *base, *location;
+ unsigned long got, gp;
+
+ DEBUGP("Applying relocate section %u to %u\n", relsec,
+ sechdrs[relsec].sh_info);
+
+ base = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr;
+ symtab = (Elf64_Sym *)sechdrs[symindex].sh_addr;
+
+ /* The small sections were sorted to the end of the segment.
+ The following should definitely cover them. */
+ gp = (u64)me->module_core + me->core_size - 0x8000;
+ got = sechdrs[me->arch.gotsecindex].sh_addr;
+
+ for (i = 0; i < n; i++) {
+ unsigned long r_sym = ELF64_R_SYM (rela[i].r_info);
+ unsigned long r_type = ELF64_R_TYPE (rela[i].r_info);
+ unsigned long r_got_offset = r_type >> 8;
+ unsigned long value, hi, lo;
+ r_type &= 0xff;
+
+ /* This is where to make the change. */
+ location = base + rela[i].r_offset;
+
+ /* This is the symbol it is referring to. Note that all
+ unresolved symbols have been resolved. */
+ sym = symtab + r_sym;
+ value = sym->st_value + rela[i].r_addend;
+
+ switch (r_type) {
+ case R_ALPHA_NONE:
+ break;
+ case R_ALPHA_REFQUAD:
+ /* BUG() can produce misaligned relocations. */
+ ((u32 *)location)[0] = value;
+ ((u32 *)location)[1] = value >> 32;
+ break;
+ case R_ALPHA_GPREL32:
+ value -= gp;
+ if ((int)value != value)
+ goto reloc_overflow;
+ *(u32 *)location = value;
+ break;
+ case R_ALPHA_LITERAL:
+ hi = got + r_got_offset;
+ lo = hi - gp;
+ if ((short)lo != lo)
+ goto reloc_overflow;
+ *(u16 *)location = lo;
+ *(u64 *)hi = value;
+ break;
+ case R_ALPHA_LITUSE:
+ break;
+ case R_ALPHA_GPDISP:
+ value = gp - (u64)location;
+ lo = (short)value;
+ hi = (int)(value - lo);
+ if (hi + lo != value)
+ goto reloc_overflow;
+ *(u16 *)location = hi >> 16;
+ *(u16 *)(location + rela[i].r_addend) = lo;
+ break;
+ case R_ALPHA_BRSGP:
+ /* BRSGP is only allowed to bind to local symbols.
+ If the section is undef, this means that the
+ value was resolved from somewhere else. */
+ if (sym->st_shndx == SHN_UNDEF)
+ goto reloc_overflow;
+ if ((sym->st_other & STO_ALPHA_STD_GPLOAD) ==
+ STO_ALPHA_STD_GPLOAD)
+ /* Omit the prologue. */
+ value += 8;
+ /* FALLTHRU */
+ case R_ALPHA_BRADDR:
+ value -= (u64)location + 4;
+ if (value & 3)
+ goto reloc_overflow;
+ value = (long)value >> 2;
+ if (value + (1<<21) >= 1<<22)
+ goto reloc_overflow;
+ value &= 0x1fffff;
+ value |= *(u32 *)location & ~0x1fffff;
+ *(u32 *)location = value;
+ break;
+ case R_ALPHA_HINT:
+ break;
+ case R_ALPHA_SREL32:
+ value -= (u64)location;
+ if ((int)value != value)
+ goto reloc_overflow;
+ *(u32 *)location = value;
+ break;
+ case R_ALPHA_SREL64:
+ value -= (u64)location;
+ *(u64 *)location = value;
+ break;
+ case R_ALPHA_GPRELHIGH:
+ value = (long)(value - gp + 0x8000) >> 16;
+ if ((short) value != value)
+ goto reloc_overflow;
+ *(u16 *)location = value;
+ break;
+ case R_ALPHA_GPRELLOW:
+ value -= gp;
+ *(u16 *)location = value;
+ break;
+ case R_ALPHA_GPREL16:
+ value -= gp;
+ if ((short) value != value)
+ goto reloc_overflow;
+ *(u16 *)location = value;
+ break;
+ default:
+ printk(KERN_ERR "module %s: Unknown relocation: %lu\n",
+ me->name, r_type);
+ return -ENOEXEC;
+ reloc_overflow:
+ if (ELF64_ST_TYPE (sym->st_info) == STT_SECTION)
+ printk(KERN_ERR
+ "module %s: Relocation overflow vs section %d\n",
+ me->name, sym->st_shndx);
+ else
+ printk(KERN_ERR
+ "module %s: Relocation overflow vs %s\n",
+ me->name, strtab + sym->st_name);
+ return -ENOEXEC;
+ }
+ }
+
+ return 0;
+}
+
+int
+module_finalize(const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs,
+ struct module *me)
+{
+ return 0;
+}
+
+void
+module_arch_cleanup(struct module *mod)
+{
+}
diff --git a/arch/alpha/kernel/ns87312.c b/arch/alpha/kernel/ns87312.c
new file mode 100644
index 000000000000..342b56d24c20
--- /dev/null
+++ b/arch/alpha/kernel/ns87312.c
@@ -0,0 +1,38 @@
+/*
+ * linux/arch/alpha/kernel/ns87312.c
+ */
+
+#include <linux/init.h>
+#include <asm/io.h>
+#include "proto.h"
+
+
+/*
+ * The SRM console *disables* the IDE interface, this code ensures it's
+ * enabled.
+ *
+ * This code bangs on a control register of the 87312 Super I/O chip
+ * that implements parallel port/serial ports/IDE/FDI. Depending on
+ * the motherboard, the Super I/O chip can be configured through a
+ * pair of registers that are located either at I/O ports 0x26e/0x26f
+ * or 0x398/0x399. Unfortunately, autodetecting which base address is
+ * in use works only once (right after a reset). The Super I/O chip
+ * has the additional quirk that configuration register data must be
+ * written twice (I believe this is a safety feature to prevent
+ * accidental modification---fun, isn't it?).
+ */
+
+void __init
+ns87312_enable_ide(long ide_base)
+{
+ int data;
+ unsigned long flags;
+
+ local_irq_save(flags);
+ outb(0, ide_base); /* set the index register for reg #0 */
+ data = inb(ide_base+1); /* read the current contents */
+ outb(0, ide_base); /* set the index register for reg #0 */
+ outb(data | 0x40, ide_base+1); /* turn on IDE */
+ outb(data | 0x40, ide_base+1); /* turn on IDE, really! */
+ local_irq_restore(flags);
+}
diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c
new file mode 100644
index 000000000000..b5d0fd2bb10a
--- /dev/null
+++ b/arch/alpha/kernel/osf_sys.c
@@ -0,0 +1,1345 @@
+/*
+ * linux/arch/alpha/kernel/osf_sys.c
+ *
+ * Copyright (C) 1995 Linus Torvalds
+ */
+
+/*
+ * This file handles some of the stranger OSF/1 system call interfaces.
+ * Some of the system calls expect a non-C calling standard, others have
+ * special parameter blocks..
+ */
+
+#include <linux/errno.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/smp.h>
+#include <linux/smp_lock.h>
+#include <linux/stddef.h>
+#include <linux/syscalls.h>
+#include <linux/unistd.h>
+#include <linux/ptrace.h>
+#include <linux/slab.h>
+#include <linux/user.h>
+#include <linux/a.out.h>
+#include <linux/utsname.h>
+#include <linux/time.h>
+#include <linux/timex.h>
+#include <linux/major.h>
+#include <linux/stat.h>
+#include <linux/mman.h>
+#include <linux/shm.h>
+#include <linux/poll.h>
+#include <linux/file.h>
+#include <linux/types.h>
+#include <linux/ipc.h>
+#include <linux/namei.h>
+#include <linux/uio.h>
+#include <linux/vfs.h>
+
+#include <asm/fpu.h>
+#include <asm/io.h>
+#include <asm/uaccess.h>
+#include <asm/system.h>
+#include <asm/sysinfo.h>
+#include <asm/hwrpb.h>
+#include <asm/processor.h>
+
+extern int do_pipe(int *);
+
+/*
+ * Brk needs to return an error. Still support Linux's brk(0) query idiom,
+ * which OSF programs just shouldn't be doing. We're still not quite
+ * identical to OSF as we don't return 0 on success, but doing otherwise
+ * would require changes to libc. Hopefully this is good enough.
+ */
+asmlinkage unsigned long
+osf_brk(unsigned long brk)
+{
+ unsigned long retval = sys_brk(brk);
+ if (brk && brk != retval)
+ retval = -ENOMEM;
+ return retval;
+}
+
+/*
+ * This is pure guess-work..
+ */
+asmlinkage int
+osf_set_program_attributes(unsigned long text_start, unsigned long text_len,
+ unsigned long bss_start, unsigned long bss_len)
+{
+ struct mm_struct *mm;
+
+ lock_kernel();
+ mm = current->mm;
+ mm->end_code = bss_start + bss_len;
+ mm->brk = bss_start + bss_len;
+#if 0
+ printk("set_program_attributes(%lx %lx %lx %lx)\n",
+ text_start, text_len, bss_start, bss_len);
+#endif
+ unlock_kernel();
+ return 0;
+}
+
+/*
+ * OSF/1 directory handling functions...
+ *
+ * The "getdents()" interface is much more sane: the "basep" stuff is
+ * braindamage (it can't really handle filesystems where the directory
+ * offset differences aren't the same as "d_reclen").
+ */
+#define NAME_OFFSET offsetof (struct osf_dirent, d_name)
+#define ROUND_UP(x) (((x)+3) & ~3)
+
+struct osf_dirent {
+ unsigned int d_ino;
+ unsigned short d_reclen;
+ unsigned short d_namlen;
+ char d_name[1];
+};
+
+struct osf_dirent_callback {
+ struct osf_dirent __user *dirent;
+ long __user *basep;
+ unsigned int count;
+ int error;
+};
+
+static int
+osf_filldir(void *__buf, const char *name, int namlen, loff_t offset,
+ ino_t ino, unsigned int d_type)
+{
+ struct osf_dirent __user *dirent;
+ struct osf_dirent_callback *buf = (struct osf_dirent_callback *) __buf;
+ unsigned int reclen = ROUND_UP(NAME_OFFSET + namlen + 1);
+
+ buf->error = -EINVAL; /* only used if we fail */
+ if (reclen > buf->count)
+ return -EINVAL;
+ if (buf->basep) {
+ if (put_user(offset, buf->basep))
+ return -EFAULT;
+ buf->basep = NULL;
+ }
+ dirent = buf->dirent;
+ put_user(ino, &dirent->d_ino);
+ put_user(namlen, &dirent->d_namlen);
+ put_user(reclen, &dirent->d_reclen);
+ if (copy_to_user(dirent->d_name, name, namlen) ||
+ put_user(0, dirent->d_name + namlen))
+ return -EFAULT;
+ dirent = (void __user *)dirent + reclen;
+ buf->dirent = dirent;
+ buf->count -= reclen;
+ return 0;
+}
+
+asmlinkage int
+osf_getdirentries(unsigned int fd, struct osf_dirent __user *dirent,
+ unsigned int count, long __user *basep)
+{
+ int error;
+ struct file *file;
+ struct osf_dirent_callback buf;
+
+ error = -EBADF;
+ file = fget(fd);
+ if (!file)
+ goto out;
+
+ buf.dirent = dirent;
+ buf.basep = basep;
+ buf.count = count;
+ buf.error = 0;
+
+ error = vfs_readdir(file, osf_filldir, &buf);
+ if (error < 0)
+ goto out_putf;
+
+ error = buf.error;
+ if (count != buf.count)
+ error = count - buf.count;
+
+ out_putf:
+ fput(file);
+ out:
+ return error;
+}
+
+#undef ROUND_UP
+#undef NAME_OFFSET
+
+asmlinkage unsigned long
+osf_mmap(unsigned long addr, unsigned long len, unsigned long prot,
+ unsigned long flags, unsigned long fd, unsigned long off)
+{
+ struct file *file = NULL;
+ unsigned long ret = -EBADF;
+
+#if 0
+ if (flags & (_MAP_HASSEMAPHORE | _MAP_INHERIT | _MAP_UNALIGNED))
+ printk("%s: unimplemented OSF mmap flags %04lx\n",
+ current->comm, flags);
+#endif
+ if (!(flags & MAP_ANONYMOUS)) {
+ file = fget(fd);
+ if (!file)
+ goto out;
+ }
+ flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
+ down_write(&current->mm->mmap_sem);
+ ret = do_mmap(file, addr, len, prot, flags, off);
+ up_write(&current->mm->mmap_sem);
+ if (file)
+ fput(file);
+ out:
+ return ret;
+}
+
+
+/*
+ * The OSF/1 statfs structure is much larger, but this should
+ * match the beginning, at least.
+ */
+struct osf_statfs {
+ short f_type;
+ short f_flags;
+ int f_fsize;
+ int f_bsize;
+ int f_blocks;
+ int f_bfree;
+ int f_bavail;
+ int f_files;
+ int f_ffree;
+ __kernel_fsid_t f_fsid;
+};
+
+static int
+linux_to_osf_statfs(struct kstatfs *linux_stat, struct osf_statfs __user *osf_stat,
+ unsigned long bufsiz)
+{
+ struct osf_statfs tmp_stat;
+
+ tmp_stat.f_type = linux_stat->f_type;
+ tmp_stat.f_flags = 0; /* mount flags */
+ tmp_stat.f_fsize = linux_stat->f_frsize;
+ tmp_stat.f_bsize = linux_stat->f_bsize;
+ tmp_stat.f_blocks = linux_stat->f_blocks;
+ tmp_stat.f_bfree = linux_stat->f_bfree;
+ tmp_stat.f_bavail = linux_stat->f_bavail;
+ tmp_stat.f_files = linux_stat->f_files;
+ tmp_stat.f_ffree = linux_stat->f_ffree;
+ tmp_stat.f_fsid = linux_stat->f_fsid;
+ if (bufsiz > sizeof(tmp_stat))
+ bufsiz = sizeof(tmp_stat);
+ return copy_to_user(osf_stat, &tmp_stat, bufsiz) ? -EFAULT : 0;
+}
+
+static int
+do_osf_statfs(struct dentry * dentry, struct osf_statfs __user *buffer,
+ unsigned long bufsiz)
+{
+ struct kstatfs linux_stat;
+ int error = vfs_statfs(dentry->d_inode->i_sb, &linux_stat);
+ if (!error)
+ error = linux_to_osf_statfs(&linux_stat, buffer, bufsiz);
+ return error;
+}
+
+asmlinkage int
+osf_statfs(char __user *path, struct osf_statfs __user *buffer, unsigned long bufsiz)
+{
+ struct nameidata nd;
+ int retval;
+
+ retval = user_path_walk(path, &nd);
+ if (!retval) {
+ retval = do_osf_statfs(nd.dentry, buffer, bufsiz);
+ path_release(&nd);
+ }
+ return retval;
+}
+
+asmlinkage int
+osf_fstatfs(unsigned long fd, struct osf_statfs __user *buffer, unsigned long bufsiz)
+{
+ struct file *file;
+ int retval;
+
+ retval = -EBADF;
+ file = fget(fd);
+ if (file) {
+ retval = do_osf_statfs(file->f_dentry, buffer, bufsiz);
+ fput(file);
+ }
+ return retval;
+}
+
+/*
+ * Uhh.. OSF/1 mount parameters aren't exactly obvious..
+ *
+ * Although to be frank, neither are the native Linux/i386 ones..
+ */
+struct ufs_args {
+ char __user *devname;
+ int flags;
+ uid_t exroot;
+};
+
+struct cdfs_args {
+ char __user *devname;
+ int flags;
+ uid_t exroot;
+
+ /* This has lots more here, which Linux handles with the option block
+ but I'm too lazy to do the translation into ASCII. */
+};
+
+struct procfs_args {
+ char __user *devname;
+ int flags;
+ uid_t exroot;
+};
+
+/*
+ * We can't actually handle ufs yet, so we translate UFS mounts to
+ * ext2fs mounts. I wouldn't mind a UFS filesystem, but the UFS
+ * layout is so braindead it's a major headache doing it.
+ *
+ * Just how long ago was it written? OTOH our UFS driver may be still
+ * unhappy with OSF UFS. [CHECKME]
+ */
+static int
+osf_ufs_mount(char *dirname, struct ufs_args __user *args, int flags)
+{
+ int retval;
+ struct cdfs_args tmp;
+ char *devname;
+
+ retval = -EFAULT;
+ if (copy_from_user(&tmp, args, sizeof(tmp)))
+ goto out;
+ devname = getname(tmp.devname);
+ retval = PTR_ERR(devname);
+ if (IS_ERR(devname))
+ goto out;
+ retval = do_mount(devname, dirname, "ext2", flags, NULL);
+ putname(devname);
+ out:
+ return retval;
+}
+
+static int
+osf_cdfs_mount(char *dirname, struct cdfs_args __user *args, int flags)
+{
+ int retval;
+ struct cdfs_args tmp;
+ char *devname;
+
+ retval = -EFAULT;
+ if (copy_from_user(&tmp, args, sizeof(tmp)))
+ goto out;
+ devname = getname(tmp.devname);
+ retval = PTR_ERR(devname);
+ if (IS_ERR(devname))
+ goto out;
+ retval = do_mount(devname, dirname, "iso9660", flags, NULL);
+ putname(devname);
+ out:
+ return retval;
+}
+
+static int
+osf_procfs_mount(char *dirname, struct procfs_args __user *args, int flags)
+{
+ struct procfs_args tmp;
+
+ if (copy_from_user(&tmp, args, sizeof(tmp)))
+ return -EFAULT;
+
+ return do_mount("", dirname, "proc", flags, NULL);
+}
+
+asmlinkage int
+osf_mount(unsigned long typenr, char __user *path, int flag, void __user *data)
+{
+ int retval = -EINVAL;
+ char *name;
+
+ lock_kernel();
+
+ name = getname(path);
+ retval = PTR_ERR(name);
+ if (IS_ERR(name))
+ goto out;
+ switch (typenr) {
+ case 1:
+ retval = osf_ufs_mount(name, data, flag);
+ break;
+ case 6:
+ retval = osf_cdfs_mount(name, data, flag);
+ break;
+ case 9:
+ retval = osf_procfs_mount(name, data, flag);
+ break;
+ default:
+ printk("osf_mount(%ld, %x)\n", typenr, flag);
+ }
+ putname(name);
+ out:
+ unlock_kernel();
+ return retval;
+}
+
+asmlinkage int
+osf_utsname(char __user *name)
+{
+ int error;
+
+ down_read(&uts_sem);
+ error = -EFAULT;
+ if (copy_to_user(name + 0, system_utsname.sysname, 32))
+ goto out;
+ if (copy_to_user(name + 32, system_utsname.nodename, 32))
+ goto out;
+ if (copy_to_user(name + 64, system_utsname.release, 32))
+ goto out;
+ if (copy_to_user(name + 96, system_utsname.version, 32))
+ goto out;
+ if (copy_to_user(name + 128, system_utsname.machine, 32))
+ goto out;
+
+ error = 0;
+ out:
+ up_read(&uts_sem);
+ return error;
+}
+
+asmlinkage unsigned long
+sys_getpagesize(void)
+{
+ return PAGE_SIZE;
+}
+
+asmlinkage unsigned long
+sys_getdtablesize(void)
+{
+ return NR_OPEN;
+}
+
+/*
+ * For compatibility with OSF/1 only. Use utsname(2) instead.
+ */
+asmlinkage int
+osf_getdomainname(char __user *name, int namelen)
+{
+ unsigned len;
+ int i;
+
+ if (!access_ok(VERIFY_WRITE, name, namelen))
+ return -EFAULT;
+
+ len = namelen;
+ if (namelen > 32)
+ len = 32;
+
+ down_read(&uts_sem);
+ for (i = 0; i < len; ++i) {
+ __put_user(system_utsname.domainname[i], name + i);
+ if (system_utsname.domainname[i] == '\0')
+ break;
+ }
+ up_read(&uts_sem);
+
+ return 0;
+}
+
+asmlinkage long
+osf_shmat(int shmid, void __user *shmaddr, int shmflg)
+{
+ unsigned long raddr;
+ long err;
+
+ err = do_shmat(shmid, shmaddr, shmflg, &raddr);
+
+ /*
+ * This works because all user-level addresses are
+ * non-negative longs!
+ */
+ return err ? err : (long)raddr;
+}
+
+
+/*
+ * The following stuff should move into a header file should it ever
+ * be labeled "officially supported." Right now, there is just enough
+ * support to avoid applications (such as tar) printing error
+ * messages. The attributes are not really implemented.
+ */
+
+/*
+ * Values for Property list entry flag
+ */
+#define PLE_PROPAGATE_ON_COPY 0x1 /* cp(1) will copy entry
+ by default */
+#define PLE_FLAG_MASK 0x1 /* Valid flag values */
+#define PLE_FLAG_ALL -1 /* All flag value */
+
+struct proplistname_args {
+ unsigned int pl_mask;
+ unsigned int pl_numnames;
+ char **pl_names;
+};
+
+union pl_args {
+ struct setargs {
+ char __user *path;
+ long follow;
+ long nbytes;
+ char __user *buf;
+ } set;
+ struct fsetargs {
+ long fd;
+ long nbytes;
+ char __user *buf;
+ } fset;
+ struct getargs {
+ char __user *path;
+ long follow;
+ struct proplistname_args __user *name_args;
+ long nbytes;
+ char __user *buf;
+ int __user *min_buf_size;
+ } get;
+ struct fgetargs {
+ long fd;
+ struct proplistname_args __user *name_args;
+ long nbytes;
+ char __user *buf;
+ int __user *min_buf_size;
+ } fget;
+ struct delargs {
+ char __user *path;
+ long follow;
+ struct proplistname_args __user *name_args;
+ } del;
+ struct fdelargs {
+ long fd;
+ struct proplistname_args __user *name_args;
+ } fdel;
+};
+
+enum pl_code {
+ PL_SET = 1, PL_FSET = 2,
+ PL_GET = 3, PL_FGET = 4,
+ PL_DEL = 5, PL_FDEL = 6
+};
+
+asmlinkage long
+osf_proplist_syscall(enum pl_code code, union pl_args __user *args)
+{
+ long error;
+ int __user *min_buf_size_ptr;
+
+ lock_kernel();
+ switch (code) {
+ case PL_SET:
+ if (get_user(error, &args->set.nbytes))
+ error = -EFAULT;
+ break;
+ case PL_FSET:
+ if (get_user(error, &args->fset.nbytes))
+ error = -EFAULT;
+ break;
+ case PL_GET:
+ error = get_user(min_buf_size_ptr, &args->get.min_buf_size);
+ if (error)
+ break;
+ error = put_user(0, min_buf_size_ptr);
+ break;
+ case PL_FGET:
+ error = get_user(min_buf_size_ptr, &args->fget.min_buf_size);
+ if (error)
+ break;
+ error = put_user(0, min_buf_size_ptr);
+ break;
+ case PL_DEL:
+ case PL_FDEL:
+ error = 0;
+ break;
+ default:
+ error = -EOPNOTSUPP;
+ break;
+ };
+ unlock_kernel();
+ return error;
+}
+
+asmlinkage int
+osf_sigstack(struct sigstack __user *uss, struct sigstack __user *uoss)
+{
+ unsigned long usp = rdusp();
+ unsigned long oss_sp = current->sas_ss_sp + current->sas_ss_size;
+ unsigned long oss_os = on_sig_stack(usp);
+ int error;
+
+ if (uss) {
+ void __user *ss_sp;
+
+ error = -EFAULT;
+ if (get_user(ss_sp, &uss->ss_sp))
+ goto out;
+
+ /* If the current stack was set with sigaltstack, don't
+ swap stacks while we are on it. */
+ error = -EPERM;
+ if (current->sas_ss_sp && on_sig_stack(usp))
+ goto out;
+
+ /* Since we don't know the extent of the stack, and we don't
+ track onstack-ness, but rather calculate it, we must
+ presume a size. Ho hum this interface is lossy. */
+ current->sas_ss_sp = (unsigned long)ss_sp - SIGSTKSZ;
+ current->sas_ss_size = SIGSTKSZ;
+ }
+
+ if (uoss) {
+ error = -EFAULT;
+ if (! access_ok(VERIFY_WRITE, uoss, sizeof(*uoss))
+ || __put_user(oss_sp, &uoss->ss_sp)
+ || __put_user(oss_os, &uoss->ss_onstack))
+ goto out;
+ }
+
+ error = 0;
+ out:
+ return error;
+}
+
+asmlinkage long
+osf_sysinfo(int command, char __user *buf, long count)
+{
+ static char * sysinfo_table[] = {
+ system_utsname.sysname,
+ system_utsname.nodename,
+ system_utsname.release,
+ system_utsname.version,
+ system_utsname.machine,
+ "alpha", /* instruction set architecture */
+ "dummy", /* hardware serial number */
+ "dummy", /* hardware manufacturer */
+ "dummy", /* secure RPC domain */
+ };
+ unsigned long offset;
+ char *res;
+ long len, err = -EINVAL;
+
+ offset = command-1;
+ if (offset >= sizeof(sysinfo_table)/sizeof(char *)) {
+ /* Digital UNIX has a few unpublished interfaces here */
+ printk("sysinfo(%d)", command);
+ goto out;
+ }
+
+ down_read(&uts_sem);
+ res = sysinfo_table[offset];
+ len = strlen(res)+1;
+ if (len > count)
+ len = count;
+ if (copy_to_user(buf, res, len))
+ err = -EFAULT;
+ else
+ err = 0;
+ up_read(&uts_sem);
+ out:
+ return err;
+}
+
+asmlinkage unsigned long
+osf_getsysinfo(unsigned long op, void __user *buffer, unsigned long nbytes,
+ int __user *start, void __user *arg)
+{
+ unsigned long w;
+ struct percpu_struct *cpu;
+
+ switch (op) {
+ case GSI_IEEE_FP_CONTROL:
+ /* Return current software fp control & status bits. */
+ /* Note that DU doesn't verify available space here. */
+
+ w = current_thread_info()->ieee_state & IEEE_SW_MASK;
+ w = swcr_update_status(w, rdfpcr());
+ if (put_user(w, (unsigned long __user *) buffer))
+ return -EFAULT;
+ return 0;
+
+ case GSI_IEEE_STATE_AT_SIGNAL:
+ /*
+ * Not sure anybody will ever use this weird stuff. These
+ * ops can be used (under OSF/1) to set the fpcr that should
+ * be used when a signal handler starts executing.
+ */
+ break;
+
+ case GSI_UACPROC:
+ if (nbytes < sizeof(unsigned int))
+ return -EINVAL;
+ w = (current_thread_info()->flags >> UAC_SHIFT) & UAC_BITMASK;
+ if (put_user(w, (unsigned int __user *)buffer))
+ return -EFAULT;
+ return 1;
+
+ case GSI_PROC_TYPE:
+ if (nbytes < sizeof(unsigned long))
+ return -EINVAL;
+ cpu = (struct percpu_struct*)
+ ((char*)hwrpb + hwrpb->processor_offset);
+ w = cpu->type;
+ if (put_user(w, (unsigned long __user*)buffer))
+ return -EFAULT;
+ return 1;
+
+ case GSI_GET_HWRPB:
+ if (nbytes < sizeof(*hwrpb))
+ return -EINVAL;
+ if (copy_to_user(buffer, hwrpb, nbytes) != 0)
+ return -EFAULT;
+ return 1;
+
+ default:
+ break;
+ }
+
+ return -EOPNOTSUPP;
+}
+
+asmlinkage unsigned long
+osf_setsysinfo(unsigned long op, void __user *buffer, unsigned long nbytes,
+ int __user *start, void __user *arg)
+{
+ switch (op) {
+ case SSI_IEEE_FP_CONTROL: {
+ unsigned long swcr, fpcr;
+ unsigned int *state;
+
+ /*
+ * Alpha Architecture Handbook 4.7.7.3:
+ * To be fully IEEE compiant, we must track the current IEEE
+ * exception state in software, because spurrious bits can be
+ * set in the trap shadow of a software-complete insn.
+ */
+
+ if (get_user(swcr, (unsigned long __user *)buffer))
+ return -EFAULT;
+ state = &current_thread_info()->ieee_state;
+
+ /* Update softare trap enable bits. */
+ *state = (*state & ~IEEE_SW_MASK) | (swcr & IEEE_SW_MASK);
+
+ /* Update the real fpcr. */
+ fpcr = rdfpcr() & FPCR_DYN_MASK;
+ fpcr |= ieee_swcr_to_fpcr(swcr);
+ wrfpcr(fpcr);
+
+ return 0;
+ }
+
+ case SSI_IEEE_RAISE_EXCEPTION: {
+ unsigned long exc, swcr, fpcr, fex;
+ unsigned int *state;
+
+ if (get_user(exc, (unsigned long __user *)buffer))
+ return -EFAULT;
+ state = &current_thread_info()->ieee_state;
+ exc &= IEEE_STATUS_MASK;
+
+ /* Update softare trap enable bits. */
+ swcr = (*state & IEEE_SW_MASK) | exc;
+ *state |= exc;
+
+ /* Update the real fpcr. */
+ fpcr = rdfpcr();
+ fpcr |= ieee_swcr_to_fpcr(swcr);
+ wrfpcr(fpcr);
+
+ /* If any exceptions set by this call, and are unmasked,
+ send a signal. Old exceptions are not signaled. */
+ fex = (exc >> IEEE_STATUS_TO_EXCSUM_SHIFT) & swcr;
+ if (fex) {
+ siginfo_t info;
+ int si_code = 0;
+
+ if (fex & IEEE_TRAP_ENABLE_DNO) si_code = FPE_FLTUND;
+ if (fex & IEEE_TRAP_ENABLE_INE) si_code = FPE_FLTRES;
+ if (fex & IEEE_TRAP_ENABLE_UNF) si_code = FPE_FLTUND;
+ if (fex & IEEE_TRAP_ENABLE_OVF) si_code = FPE_FLTOVF;
+ if (fex & IEEE_TRAP_ENABLE_DZE) si_code = FPE_FLTDIV;
+ if (fex & IEEE_TRAP_ENABLE_INV) si_code = FPE_FLTINV;
+
+ info.si_signo = SIGFPE;
+ info.si_errno = 0;
+ info.si_code = si_code;
+ info.si_addr = NULL; /* FIXME */
+ send_sig_info(SIGFPE, &info, current);
+ }
+ return 0;
+ }
+
+ case SSI_IEEE_STATE_AT_SIGNAL:
+ case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
+ /*
+ * Not sure anybody will ever use this weird stuff. These
+ * ops can be used (under OSF/1) to set the fpcr that should
+ * be used when a signal handler starts executing.
+ */
+ break;
+
+ case SSI_NVPAIRS: {
+ unsigned long v, w, i;
+ unsigned int old, new;
+
+ for (i = 0; i < nbytes; ++i) {
+
+ if (get_user(v, 2*i + (unsigned int __user *)buffer))
+ return -EFAULT;
+ if (get_user(w, 2*i + 1 + (unsigned int __user *)buffer))
+ return -EFAULT;
+ switch (v) {
+ case SSIN_UACPROC:
+ again:
+ old = current_thread_info()->flags;
+ new = old & ~(UAC_BITMASK << UAC_SHIFT);
+ new = new | (w & UAC_BITMASK) << UAC_SHIFT;
+ if (cmpxchg(&current_thread_info()->flags,
+ old, new) != old)
+ goto again;
+ break;
+
+ default:
+ return -EOPNOTSUPP;
+ }
+ }
+ return 0;
+ }
+
+ default:
+ break;
+ }
+
+ return -EOPNOTSUPP;
+}
+
+/* Translations due to the fact that OSF's time_t is an int. Which
+ affects all sorts of things, like timeval and itimerval. */
+
+extern struct timezone sys_tz;
+extern int do_adjtimex(struct timex *);
+
+struct timeval32
+{
+ int tv_sec, tv_usec;
+};
+
+struct itimerval32
+{
+ struct timeval32 it_interval;
+ struct timeval32 it_value;
+};
+
+static inline long
+get_tv32(struct timeval *o, struct timeval32 __user *i)
+{
+ return (!access_ok(VERIFY_READ, i, sizeof(*i)) ||
+ (__get_user(o->tv_sec, &i->tv_sec) |
+ __get_user(o->tv_usec, &i->tv_usec)));
+}
+
+static inline long
+put_tv32(struct timeval32 __user *o, struct timeval *i)
+{
+ return (!access_ok(VERIFY_WRITE, o, sizeof(*o)) ||
+ (__put_user(i->tv_sec, &o->tv_sec) |
+ __put_user(i->tv_usec, &o->tv_usec)));
+}
+
+static inline long
+get_it32(struct itimerval *o, struct itimerval32 __user *i)
+{
+ return (!access_ok(VERIFY_READ, i, sizeof(*i)) ||
+ (__get_user(o->it_interval.tv_sec, &i->it_interval.tv_sec) |
+ __get_user(o->it_interval.tv_usec, &i->it_interval.tv_usec) |
+ __get_user(o->it_value.tv_sec, &i->it_value.tv_sec) |
+ __get_user(o->it_value.tv_usec, &i->it_value.tv_usec)));
+}
+
+static inline long
+put_it32(struct itimerval32 __user *o, struct itimerval *i)
+{
+ return (!access_ok(VERIFY_WRITE, o, sizeof(*o)) ||
+ (__put_user(i->it_interval.tv_sec, &o->it_interval.tv_sec) |
+ __put_user(i->it_interval.tv_usec, &o->it_interval.tv_usec) |
+ __put_user(i->it_value.tv_sec, &o->it_value.tv_sec) |
+ __put_user(i->it_value.tv_usec, &o->it_value.tv_usec)));
+}
+
+static inline void
+jiffies_to_timeval32(unsigned long jiffies, struct timeval32 *value)
+{
+ value->tv_usec = (jiffies % HZ) * (1000000L / HZ);
+ value->tv_sec = jiffies / HZ;
+}
+
+asmlinkage int
+osf_gettimeofday(struct timeval32 __user *tv, struct timezone __user *tz)
+{
+ if (tv) {
+ struct timeval ktv;
+ do_gettimeofday(&ktv);
+ if (put_tv32(tv, &ktv))
+ return -EFAULT;
+ }
+ if (tz) {
+ if (copy_to_user(tz, &sys_tz, sizeof(sys_tz)))
+ return -EFAULT;
+ }
+ return 0;
+}
+
+asmlinkage int
+osf_settimeofday(struct timeval32 __user *tv, struct timezone __user *tz)
+{
+ struct timespec kts;
+ struct timezone ktz;
+
+ if (tv) {
+ if (get_tv32((struct timeval *)&kts, tv))
+ return -EFAULT;
+ }
+ if (tz) {
+ if (copy_from_user(&ktz, tz, sizeof(*tz)))
+ return -EFAULT;
+ }
+
+ kts.tv_nsec *= 1000;
+
+ return do_sys_settimeofday(tv ? &kts : NULL, tz ? &ktz : NULL);
+}
+
+asmlinkage int
+osf_getitimer(int which, struct itimerval32 __user *it)
+{
+ struct itimerval kit;
+ int error;
+
+ error = do_getitimer(which, &kit);
+ if (!error && put_it32(it, &kit))
+ error = -EFAULT;
+
+ return error;
+}
+
+asmlinkage int
+osf_setitimer(int which, struct itimerval32 __user *in, struct itimerval32 __user *out)
+{
+ struct itimerval kin, kout;
+ int error;
+
+ if (in) {
+ if (get_it32(&kin, in))
+ return -EFAULT;
+ } else
+ memset(&kin, 0, sizeof(kin));
+
+ error = do_setitimer(which, &kin, out ? &kout : NULL);
+ if (error || !out)
+ return error;
+
+ if (put_it32(out, &kout))
+ return -EFAULT;
+
+ return 0;
+
+}
+
+asmlinkage int
+osf_utimes(char __user *filename, struct timeval32 __user *tvs)
+{
+ struct timeval ktvs[2];
+
+ if (tvs) {
+ if (get_tv32(&ktvs[0], &tvs[0]) ||
+ get_tv32(&ktvs[1], &tvs[1]))
+ return -EFAULT;
+ }
+
+ return do_utimes(filename, tvs ? ktvs : NULL);
+}
+
+#define MAX_SELECT_SECONDS \
+ ((unsigned long) (MAX_SCHEDULE_TIMEOUT / HZ)-1)
+
+asmlinkage int
+osf_select(int n, fd_set __user *inp, fd_set __user *outp, fd_set __user *exp,
+ struct timeval32 __user *tvp)
+{
+ fd_set_bits fds;
+ char *bits;
+ size_t size;
+ long timeout;
+ int ret = -EINVAL;
+
+ timeout = MAX_SCHEDULE_TIMEOUT;
+ if (tvp) {
+ time_t sec, usec;
+
+ if (!access_ok(VERIFY_READ, tvp, sizeof(*tvp))
+ || __get_user(sec, &tvp->tv_sec)
+ || __get_user(usec, &tvp->tv_usec)) {
+ ret = -EFAULT;
+ goto out_nofds;
+ }
+
+ if (sec < 0 || usec < 0)
+ goto out_nofds;
+
+ if ((unsigned long) sec < MAX_SELECT_SECONDS) {
+ timeout = (usec + 1000000/HZ - 1) / (1000000/HZ);
+ timeout += sec * (unsigned long) HZ;
+ }
+ }
+
+ if (n < 0 || n > current->files->max_fdset)
+ goto out_nofds;
+
+ /*
+ * We need 6 bitmaps (in/out/ex for both incoming and outgoing),
+ * since we used fdset we need to allocate memory in units of
+ * long-words.
+ */
+ ret = -ENOMEM;
+ size = FDS_BYTES(n);
+ bits = kmalloc(6 * size, GFP_KERNEL);
+ if (!bits)
+ goto out_nofds;
+ fds.in = (unsigned long *) bits;
+ fds.out = (unsigned long *) (bits + size);
+ fds.ex = (unsigned long *) (bits + 2*size);
+ fds.res_in = (unsigned long *) (bits + 3*size);
+ fds.res_out = (unsigned long *) (bits + 4*size);
+ fds.res_ex = (unsigned long *) (bits + 5*size);
+
+ if ((ret = get_fd_set(n, inp->fds_bits, fds.in)) ||
+ (ret = get_fd_set(n, outp->fds_bits, fds.out)) ||
+ (ret = get_fd_set(n, exp->fds_bits, fds.ex)))
+ goto out;
+ zero_fd_set(n, fds.res_in);
+ zero_fd_set(n, fds.res_out);
+ zero_fd_set(n, fds.res_ex);
+
+ ret = do_select(n, &fds, &timeout);
+
+ /* OSF does not copy back the remaining time. */
+
+ if (ret < 0)
+ goto out;
+ if (!ret) {
+ ret = -ERESTARTNOHAND;
+ if (signal_pending(current))
+ goto out;
+ ret = 0;
+ }
+
+ if (set_fd_set(n, inp->fds_bits, fds.res_in) ||
+ set_fd_set(n, outp->fds_bits, fds.res_out) ||
+ set_fd_set(n, exp->fds_bits, fds.res_ex))
+ ret = -EFAULT;
+
+ out:
+ kfree(bits);
+ out_nofds:
+ return ret;
+}
+
+struct rusage32 {
+ struct timeval32 ru_utime; /* user time used */
+ struct timeval32 ru_stime; /* system time used */
+ long ru_maxrss; /* maximum resident set size */
+ long ru_ixrss; /* integral shared memory size */
+ long ru_idrss; /* integral unshared data size */
+ long ru_isrss; /* integral unshared stack size */
+ long ru_minflt; /* page reclaims */
+ long ru_majflt; /* page faults */
+ long ru_nswap; /* swaps */
+ long ru_inblock; /* block input operations */
+ long ru_oublock; /* block output operations */
+ long ru_msgsnd; /* messages sent */
+ long ru_msgrcv; /* messages received */
+ long ru_nsignals; /* signals received */
+ long ru_nvcsw; /* voluntary context switches */
+ long ru_nivcsw; /* involuntary " */
+};
+
+asmlinkage int
+osf_getrusage(int who, struct rusage32 __user *ru)
+{
+ struct rusage32 r;
+
+ if (who != RUSAGE_SELF && who != RUSAGE_CHILDREN)
+ return -EINVAL;
+
+ memset(&r, 0, sizeof(r));
+ switch (who) {
+ case RUSAGE_SELF:
+ jiffies_to_timeval32(current->utime, &r.ru_utime);
+ jiffies_to_timeval32(current->stime, &r.ru_stime);
+ r.ru_minflt = current->min_flt;
+ r.ru_majflt = current->maj_flt;
+ break;
+ case RUSAGE_CHILDREN:
+ jiffies_to_timeval32(current->signal->cutime, &r.ru_utime);
+ jiffies_to_timeval32(current->signal->cstime, &r.ru_stime);
+ r.ru_minflt = current->signal->cmin_flt;
+ r.ru_majflt = current->signal->cmaj_flt;
+ break;
+ }
+
+ return copy_to_user(ru, &r, sizeof(r)) ? -EFAULT : 0;
+}
+
+asmlinkage long
+osf_wait4(pid_t pid, int __user *ustatus, int options,
+ struct rusage32 __user *ur)
+{
+ struct rusage r;
+ long ret, err;
+ mm_segment_t old_fs;
+
+ if (!ur)
+ return sys_wait4(pid, ustatus, options, NULL);
+
+ old_fs = get_fs();
+
+ set_fs (KERNEL_DS);
+ ret = sys_wait4(pid, ustatus, options, (struct rusage __user *) &r);
+ set_fs (old_fs);
+
+ if (!access_ok(VERIFY_WRITE, ur, sizeof(*ur)))
+ return -EFAULT;
+
+ err = 0;
+ err |= __put_user(r.ru_utime.tv_sec, &ur->ru_utime.tv_sec);
+ err |= __put_user(r.ru_utime.tv_usec, &ur->ru_utime.tv_usec);
+ err |= __put_user(r.ru_stime.tv_sec, &ur->ru_stime.tv_sec);
+ err |= __put_user(r.ru_stime.tv_usec, &ur->ru_stime.tv_usec);
+ err |= __put_user(r.ru_maxrss, &ur->ru_maxrss);
+ err |= __put_user(r.ru_ixrss, &ur->ru_ixrss);
+ err |= __put_user(r.ru_idrss, &ur->ru_idrss);
+ err |= __put_user(r.ru_isrss, &ur->ru_isrss);
+ err |= __put_user(r.ru_minflt, &ur->ru_minflt);
+ err |= __put_user(r.ru_majflt, &ur->ru_majflt);
+ err |= __put_user(r.ru_nswap, &ur->ru_nswap);
+ err |= __put_user(r.ru_inblock, &ur->ru_inblock);
+ err |= __put_user(r.ru_oublock, &ur->ru_oublock);
+ err |= __put_user(r.ru_msgsnd, &ur->ru_msgsnd);
+ err |= __put_user(r.ru_msgrcv, &ur->ru_msgrcv);
+ err |= __put_user(r.ru_nsignals, &ur->ru_nsignals);
+ err |= __put_user(r.ru_nvcsw, &ur->ru_nvcsw);
+ err |= __put_user(r.ru_nivcsw, &ur->ru_nivcsw);
+
+ return err ? err : ret;
+}
+
+/*
+ * I don't know what the parameters are: the first one
+ * seems to be a timeval pointer, and I suspect the second
+ * one is the time remaining.. Ho humm.. No documentation.
+ */
+asmlinkage int
+osf_usleep_thread(struct timeval32 __user *sleep, struct timeval32 __user *remain)
+{
+ struct timeval tmp;
+ unsigned long ticks;
+
+ if (get_tv32(&tmp, sleep))
+ goto fault;
+
+ ticks = tmp.tv_usec;
+ ticks = (ticks + (1000000 / HZ) - 1) / (1000000 / HZ);
+ ticks += tmp.tv_sec * HZ;
+
+ current->state = TASK_INTERRUPTIBLE;
+ ticks = schedule_timeout(ticks);
+
+ if (remain) {
+ tmp.tv_sec = ticks / HZ;
+ tmp.tv_usec = ticks % HZ;
+ if (put_tv32(remain, &tmp))
+ goto fault;
+ }
+
+ return 0;
+ fault:
+ return -EFAULT;
+}
+
+
+struct timex32 {
+ unsigned int modes; /* mode selector */
+ long offset; /* time offset (usec) */
+ long freq; /* frequency offset (scaled ppm) */
+ long maxerror; /* maximum error (usec) */
+ long esterror; /* estimated error (usec) */
+ int status; /* clock command/status */
+ long constant; /* pll time constant */
+ long precision; /* clock precision (usec) (read only) */
+ long tolerance; /* clock frequency tolerance (ppm)
+ * (read only)
+ */
+ struct timeval32 time; /* (read only) */
+ long tick; /* (modified) usecs between clock ticks */
+
+ long ppsfreq; /* pps frequency (scaled ppm) (ro) */
+ long jitter; /* pps jitter (us) (ro) */
+ int shift; /* interval duration (s) (shift) (ro) */
+ long stabil; /* pps stability (scaled ppm) (ro) */
+ long jitcnt; /* jitter limit exceeded (ro) */
+ long calcnt; /* calibration intervals (ro) */
+ long errcnt; /* calibration errors (ro) */
+ long stbcnt; /* stability limit exceeded (ro) */
+
+ int :32; int :32; int :32; int :32;
+ int :32; int :32; int :32; int :32;
+ int :32; int :32; int :32; int :32;
+};
+
+asmlinkage int
+sys_old_adjtimex(struct timex32 __user *txc_p)
+{
+ struct timex txc;
+ int ret;
+
+ /* copy relevant bits of struct timex. */
+ if (copy_from_user(&txc, txc_p, offsetof(struct timex32, time)) ||
+ copy_from_user(&txc.tick, &txc_p->tick, sizeof(struct timex32) -
+ offsetof(struct timex32, time)))
+ return -EFAULT;
+
+ ret = do_adjtimex(&txc);
+ if (ret < 0)
+ return ret;
+
+ /* copy back to timex32 */
+ if (copy_to_user(txc_p, &txc, offsetof(struct timex32, time)) ||
+ (copy_to_user(&txc_p->tick, &txc.tick, sizeof(struct timex32) -
+ offsetof(struct timex32, tick))) ||
+ (put_tv32(&txc_p->time, &txc.time)))
+ return -EFAULT;
+
+ return ret;
+}
+
+/* Get an address range which is currently unmapped. Similar to the
+ generic version except that we know how to honor ADDR_LIMIT_32BIT. */
+
+static unsigned long
+arch_get_unmapped_area_1(unsigned long addr, unsigned long len,
+ unsigned long limit)
+{
+ struct vm_area_struct *vma = find_vma(current->mm, addr);
+
+ while (1) {
+ /* At this point: (!vma || addr < vma->vm_end). */
+ if (limit - len < addr)
+ return -ENOMEM;
+ if (!vma || addr + len <= vma->vm_start)
+ return addr;
+ addr = vma->vm_end;
+ vma = vma->vm_next;
+ }
+}
+
+unsigned long
+arch_get_unmapped_area(struct file *filp, unsigned long addr,
+ unsigned long len, unsigned long pgoff,
+ unsigned long flags)
+{
+ unsigned long limit;
+
+ /* "32 bit" actually means 31 bit, since pointers sign extend. */
+ if (current->personality & ADDR_LIMIT_32BIT)
+ limit = 0x80000000;
+ else
+ limit = TASK_SIZE;
+
+ if (len > limit)
+ return -ENOMEM;
+
+ /* First, see if the given suggestion fits.
+
+ The OSF/1 loader (/sbin/loader) relies on us returning an
+ address larger than the requested if one exists, which is
+ a terribly broken way to program.
+
+ That said, I can see the use in being able to suggest not
+ merely specific addresses, but regions of memory -- perhaps
+ this feature should be incorporated into all ports? */
+
+ if (addr) {
+ addr = arch_get_unmapped_area_1 (PAGE_ALIGN(addr), len, limit);
+ if (addr != (unsigned long) -ENOMEM)
+ return addr;
+ }
+
+ /* Next, try allocating at TASK_UNMAPPED_BASE. */
+ addr = arch_get_unmapped_area_1 (PAGE_ALIGN(TASK_UNMAPPED_BASE),
+ len, limit);
+ if (addr != (unsigned long) -ENOMEM)
+ return addr;
+
+ /* Finally, try allocating in low memory. */
+ addr = arch_get_unmapped_area_1 (PAGE_SIZE, len, limit);
+
+ return addr;
+}
+
+#ifdef CONFIG_OSF4_COMPAT
+
+/* Clear top 32 bits of iov_len in the user's buffer for
+ compatibility with old versions of OSF/1 where iov_len
+ was defined as int. */
+static int
+osf_fix_iov_len(const struct iovec __user *iov, unsigned long count)
+{
+ unsigned long i;
+
+ for (i = 0 ; i < count ; i++) {
+ int __user *iov_len_high = (int __user *)&iov[i].iov_len + 1;
+
+ if (put_user(0, iov_len_high))
+ return -EFAULT;
+ }
+ return 0;
+}
+
+asmlinkage ssize_t
+osf_readv(unsigned long fd, const struct iovec __user * vector, unsigned long count)
+{
+ if (unlikely(personality(current->personality) == PER_OSF4))
+ if (osf_fix_iov_len(vector, count))
+ return -EFAULT;
+ return sys_readv(fd, vector, count);
+}
+
+asmlinkage ssize_t
+osf_writev(unsigned long fd, const struct iovec __user * vector, unsigned long count)
+{
+ if (unlikely(personality(current->personality) == PER_OSF4))
+ if (osf_fix_iov_len(vector, count))
+ return -EFAULT;
+ return sys_writev(fd, vector, count);
+}
+
+#endif
diff --git a/arch/alpha/kernel/pci-noop.c b/arch/alpha/kernel/pci-noop.c
new file mode 100644
index 000000000000..582a3519fb28
--- /dev/null
+++ b/arch/alpha/kernel/pci-noop.c
@@ -0,0 +1,214 @@
+/*
+ * linux/arch/alpha/kernel/pci-noop.c
+ *
+ * Stub PCI interfaces for Jensen-specific kernels.
+ */
+
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/bootmem.h>
+#include <linux/mm.h>
+#include <linux/errno.h>
+#include <linux/sched.h>
+#include <linux/dma-mapping.h>
+
+#include "proto.h"
+
+
+/*
+ * The PCI controller list.
+ */
+
+struct pci_controller *hose_head, **hose_tail = &hose_head;
+struct pci_controller *pci_isa_hose;
+
+
+struct pci_controller * __init
+alloc_pci_controller(void)
+{
+ struct pci_controller *hose;
+
+ hose = alloc_bootmem(sizeof(*hose));
+
+ *hose_tail = hose;
+ hose_tail = &hose->next;
+
+ return hose;
+}
+
+struct resource * __init
+alloc_resource(void)
+{
+ struct resource *res;
+
+ res = alloc_bootmem(sizeof(*res));
+
+ return res;
+}
+
+asmlinkage long
+sys_pciconfig_iobase(long which, unsigned long bus, unsigned long dfn)
+{
+ struct pci_controller *hose;
+
+ /* from hose or from bus.devfn */
+ if (which & IOBASE_FROM_HOSE) {
+ for (hose = hose_head; hose; hose = hose->next)
+ if (hose->index == bus)
+ break;
+ if (!hose)
+ return -ENODEV;
+ } else {
+ /* Special hook for ISA access. */
+ if (bus == 0 && dfn == 0)
+ hose = pci_isa_hose;
+ else
+ return -ENODEV;
+ }
+
+ switch (which & ~IOBASE_FROM_HOSE) {
+ case IOBASE_HOSE:
+ return hose->index;
+ case IOBASE_SPARSE_MEM:
+ return hose->sparse_mem_base;
+ case IOBASE_DENSE_MEM:
+ return hose->dense_mem_base;
+ case IOBASE_SPARSE_IO:
+ return hose->sparse_io_base;
+ case IOBASE_DENSE_IO:
+ return hose->dense_io_base;
+ case IOBASE_ROOT_BUS:
+ return hose->bus->number;
+ }
+
+ return -EOPNOTSUPP;
+}
+
+asmlinkage long
+sys_pciconfig_read(unsigned long bus, unsigned long dfn,
+ unsigned long off, unsigned long len, void *buf)
+{
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+ else
+ return -ENODEV;
+}
+
+asmlinkage long
+sys_pciconfig_write(unsigned long bus, unsigned long dfn,
+ unsigned long off, unsigned long len, void *buf)
+{
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+ else
+ return -ENODEV;
+}
+
+/* Stubs for the routines in pci_iommu.c: */
+
+void *
+pci_alloc_consistent(struct pci_dev *pdev, size_t size, dma_addr_t *dma_addrp)
+{
+ return NULL;
+}
+
+void
+pci_free_consistent(struct pci_dev *pdev, size_t size, void *cpu_addr,
+ dma_addr_t dma_addr)
+{
+}
+
+dma_addr_t
+pci_map_single(struct pci_dev *pdev, void *cpu_addr, size_t size,
+ int direction)
+{
+ return (dma_addr_t) 0;
+}
+
+void
+pci_unmap_single(struct pci_dev *pdev, dma_addr_t dma_addr, size_t size,
+ int direction)
+{
+}
+
+int
+pci_map_sg(struct pci_dev *pdev, struct scatterlist *sg, int nents,
+ int direction)
+{
+ return 0;
+}
+
+void
+pci_unmap_sg(struct pci_dev *pdev, struct scatterlist *sg, int nents,
+ int direction)
+{
+}
+
+int
+pci_dma_supported(struct pci_dev *hwdev, dma_addr_t mask)
+{
+ return 0;
+}
+
+/* Generic DMA mapping functions: */
+
+void *
+dma_alloc_coherent(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, int gfp)
+{
+ void *ret;
+
+ if (!dev || *dev->dma_mask >= 0xffffffffUL)
+ gfp &= ~GFP_DMA;
+ ret = (void *)__get_free_pages(gfp, get_order(size));
+ if (ret) {
+ memset(ret, 0, size);
+ *dma_handle = virt_to_bus(ret);
+ }
+ return ret;
+}
+
+EXPORT_SYMBOL(dma_alloc_coherent);
+
+int
+dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
+ enum dma_data_direction direction)
+{
+ int i;
+
+ for (i = 0; i < nents; i++ ) {
+ void *va;
+
+ BUG_ON(!sg[i].page);
+ va = page_address(sg[i].page) + sg[i].offset;
+ sg_dma_address(sg + i) = (dma_addr_t)virt_to_bus(va);
+ sg_dma_len(sg + i) = sg[i].length;
+ }
+
+ return nents;
+}
+
+EXPORT_SYMBOL(dma_map_sg);
+
+int
+dma_set_mask(struct device *dev, u64 mask)
+{
+ if (!dev->dma_mask || !dma_supported(dev, mask))
+ return -EIO;
+
+ *dev->dma_mask = mask;
+
+ return 0;
+}
+
+void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen)
+{
+ return NULL;
+}
+
+void pci_iounmap(struct pci_dev *dev, void __iomem * addr)
+{
+}
+
+EXPORT_SYMBOL(pci_iomap);
+EXPORT_SYMBOL(pci_iounmap);
diff --git a/arch/alpha/kernel/pci.c b/arch/alpha/kernel/pci.c
new file mode 100644
index 000000000000..1f36bbd0ed5d
--- /dev/null
+++ b/arch/alpha/kernel/pci.c
@@ -0,0 +1,561 @@
+/*
+ * linux/arch/alpha/kernel/pci.c
+ *
+ * Extruded from code written by
+ * Dave Rusling (david.rusling@reo.mts.dec.com)
+ * David Mosberger (davidm@cs.arizona.edu)
+ */
+
+/* 2.3.x PCI/resources, 1999 Andrea Arcangeli <andrea@suse.de> */
+
+/*
+ * Nov 2000, Ivan Kokshaysky <ink@jurassic.park.msu.ru>
+ * PCI-PCI bridges cleanup
+ */
+#include <linux/config.h>
+#include <linux/string.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/ioport.h>
+#include <linux/kernel.h>
+#include <linux/bootmem.h>
+#include <linux/module.h>
+#include <linux/cache.h>
+#include <linux/slab.h>
+#include <asm/machvec.h>
+
+#include "proto.h"
+#include "pci_impl.h"
+
+
+/*
+ * Some string constants used by the various core logics.
+ */
+
+const char *const pci_io_names[] = {
+ "PCI IO bus 0", "PCI IO bus 1", "PCI IO bus 2", "PCI IO bus 3",
+ "PCI IO bus 4", "PCI IO bus 5", "PCI IO bus 6", "PCI IO bus 7"
+};
+
+const char *const pci_mem_names[] = {
+ "PCI mem bus 0", "PCI mem bus 1", "PCI mem bus 2", "PCI mem bus 3",
+ "PCI mem bus 4", "PCI mem bus 5", "PCI mem bus 6", "PCI mem bus 7"
+};
+
+const char pci_hae0_name[] = "HAE0";
+
+/* Indicate whether we respect the PCI setup left by console. */
+/*
+ * Make this long-lived so that we know when shutting down
+ * whether we probed only or not.
+ */
+int pci_probe_only;
+
+/*
+ * The PCI controller list.
+ */
+
+struct pci_controller *hose_head, **hose_tail = &hose_head;
+struct pci_controller *pci_isa_hose;
+
+/*
+ * Quirks.
+ */
+
+static void __init
+quirk_isa_bridge(struct pci_dev *dev)
+{
+ dev->class = PCI_CLASS_BRIDGE_ISA << 8;
+}
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82378, quirk_isa_bridge);
+
+static void __init
+quirk_cypress(struct pci_dev *dev)
+{
+ /* The Notorious Cy82C693 chip. */
+
+ /* The Cypress IDE controller doesn't support native mode, but it
+ has programmable addresses of IDE command/control registers.
+ This violates PCI specifications, confuses the IDE subsystem and
+ causes resource conflicts between the primary HD_CMD register and
+ the floppy controller. Ugh. Fix that. */
+ if (dev->class >> 8 == PCI_CLASS_STORAGE_IDE) {
+ dev->resource[0].flags = 0;
+ dev->resource[1].flags = 0;
+ }
+
+ /* The Cypress bridge responds on the PCI bus in the address range
+ 0xffff0000-0xffffffff (conventional x86 BIOS ROM). There is no
+ way to turn this off. The bridge also supports several extended
+ BIOS ranges (disabled after power-up), and some consoles do turn
+ them on. So if we use a large direct-map window, or a large SG
+ window, we must avoid the entire 0xfff00000-0xffffffff region. */
+ else if (dev->class >> 8 == PCI_CLASS_BRIDGE_ISA) {
+ if (__direct_map_base + __direct_map_size >= 0xfff00000UL)
+ __direct_map_size = 0xfff00000UL - __direct_map_base;
+ else {
+ struct pci_controller *hose = dev->sysdata;
+ struct pci_iommu_arena *pci = hose->sg_pci;
+ if (pci && pci->dma_base + pci->size >= 0xfff00000UL)
+ pci->size = 0xfff00000UL - pci->dma_base;
+ }
+ }
+}
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_CONTAQ, PCI_DEVICE_ID_CONTAQ_82C693, quirk_cypress);
+
+/* Called for each device after PCI setup is done. */
+static void __init
+pcibios_fixup_final(struct pci_dev *dev)
+{
+ unsigned int class = dev->class >> 8;
+
+ if (class == PCI_CLASS_BRIDGE_ISA || class == PCI_CLASS_BRIDGE_EISA) {
+ dev->dma_mask = MAX_ISA_DMA_ADDRESS - 1;
+ isa_bridge = dev;
+ }
+}
+DECLARE_PCI_FIXUP_FINAL(PCI_ANY_ID, PCI_ANY_ID, pcibios_fixup_final);
+
+/* Just declaring that the power-of-ten prefixes are actually the
+ power-of-two ones doesn't make it true :) */
+#define KB 1024
+#define MB (1024*KB)
+#define GB (1024*MB)
+
+void
+pcibios_align_resource(void *data, struct resource *res,
+ unsigned long size, unsigned long align)
+{
+ struct pci_dev *dev = data;
+ struct pci_controller *hose = dev->sysdata;
+ unsigned long alignto;
+ unsigned long start = res->start;
+
+ if (res->flags & IORESOURCE_IO) {
+ /* Make sure we start at our min on all hoses */
+ if (start - hose->io_space->start < PCIBIOS_MIN_IO)
+ start = PCIBIOS_MIN_IO + hose->io_space->start;
+
+ /*
+ * Put everything into 0x00-0xff region modulo 0x400
+ */
+ if (start & 0x300)
+ start = (start + 0x3ff) & ~0x3ff;
+ }
+ else if (res->flags & IORESOURCE_MEM) {
+ /* Make sure we start at our min on all hoses */
+ if (start - hose->mem_space->start < PCIBIOS_MIN_MEM)
+ start = PCIBIOS_MIN_MEM + hose->mem_space->start;
+
+ /*
+ * The following holds at least for the Low Cost
+ * Alpha implementation of the PCI interface:
+ *
+ * In sparse memory address space, the first
+ * octant (16MB) of every 128MB segment is
+ * aliased to the very first 16 MB of the
+ * address space (i.e., it aliases the ISA
+ * memory address space). Thus, we try to
+ * avoid allocating PCI devices in that range.
+ * Can be allocated in 2nd-7th octant only.
+ * Devices that need more than 112MB of
+ * address space must be accessed through
+ * dense memory space only!
+ */
+
+ /* Align to multiple of size of minimum base. */
+ alignto = max(0x1000UL, align);
+ start = ALIGN(start, alignto);
+ if (hose->sparse_mem_base && size <= 7 * 16*MB) {
+ if (((start / (16*MB)) & 0x7) == 0) {
+ start &= ~(128*MB - 1);
+ start += 16*MB;
+ start = ALIGN(start, alignto);
+ }
+ if (start/(128*MB) != (start + size - 1)/(128*MB)) {
+ start &= ~(128*MB - 1);
+ start += (128 + 16)*MB;
+ start = ALIGN(start, alignto);
+ }
+ }
+ }
+
+ res->start = start;
+}
+#undef KB
+#undef MB
+#undef GB
+
+static int __init
+pcibios_init(void)
+{
+ if (alpha_mv.init_pci)
+ alpha_mv.init_pci();
+ return 0;
+}
+
+subsys_initcall(pcibios_init);
+
+char * __init
+pcibios_setup(char *str)
+{
+ return str;
+}
+
+#ifdef ALPHA_RESTORE_SRM_SETUP
+static struct pdev_srm_saved_conf *srm_saved_configs;
+
+void __init
+pdev_save_srm_config(struct pci_dev *dev)
+{
+ struct pdev_srm_saved_conf *tmp;
+ static int printed = 0;
+
+ if (!alpha_using_srm || pci_probe_only)
+ return;
+
+ if (!printed) {
+ printk(KERN_INFO "pci: enabling save/restore of SRM state\n");
+ printed = 1;
+ }
+
+ tmp = kmalloc(sizeof(*tmp), GFP_KERNEL);
+ if (!tmp) {
+ printk(KERN_ERR "%s: kmalloc() failed!\n", __FUNCTION__);
+ return;
+ }
+ tmp->next = srm_saved_configs;
+ tmp->dev = dev;
+
+ pci_save_state(dev);
+
+ srm_saved_configs = tmp;
+}
+
+void
+pci_restore_srm_config(void)
+{
+ struct pdev_srm_saved_conf *tmp;
+
+ /* No need to restore if probed only. */
+ if (pci_probe_only)
+ return;
+
+ /* Restore SRM config. */
+ for (tmp = srm_saved_configs; tmp; tmp = tmp->next) {
+ pci_restore_state(tmp->dev);
+ }
+}
+#endif
+
+void __init
+pcibios_fixup_resource(struct resource *res, struct resource *root)
+{
+ res->start += root->start;
+ res->end += root->start;
+}
+
+void __init
+pcibios_fixup_device_resources(struct pci_dev *dev, struct pci_bus *bus)
+{
+ /* Update device resources. */
+ struct pci_controller *hose = (struct pci_controller *)bus->sysdata;
+ int i;
+
+ for (i = 0; i < PCI_NUM_RESOURCES; i++) {
+ if (!dev->resource[i].start)
+ continue;
+ if (dev->resource[i].flags & IORESOURCE_IO)
+ pcibios_fixup_resource(&dev->resource[i],
+ hose->io_space);
+ else if (dev->resource[i].flags & IORESOURCE_MEM)
+ pcibios_fixup_resource(&dev->resource[i],
+ hose->mem_space);
+ }
+}
+
+void __init
+pcibios_fixup_bus(struct pci_bus *bus)
+{
+ /* Propagate hose info into the subordinate devices. */
+
+ struct pci_controller *hose = bus->sysdata;
+ struct pci_dev *dev = bus->self;
+
+ if (!dev) {
+ /* Root bus. */
+ u32 pci_mem_end;
+ u32 sg_base = hose->sg_pci ? hose->sg_pci->dma_base : ~0;
+ unsigned long end;
+
+ bus->resource[0] = hose->io_space;
+ bus->resource[1] = hose->mem_space;
+
+ /* Adjust hose mem_space limit to prevent PCI allocations
+ in the iommu windows. */
+ pci_mem_end = min((u32)__direct_map_base, sg_base) - 1;
+ end = hose->mem_space->start + pci_mem_end;
+ if (hose->mem_space->end > end)
+ hose->mem_space->end = end;
+ } else if (pci_probe_only &&
+ (dev->class >> 8) == PCI_CLASS_BRIDGE_PCI) {
+ pci_read_bridge_bases(bus);
+ pcibios_fixup_device_resources(dev, bus);
+ }
+
+ list_for_each_entry(dev, &bus->devices, bus_list) {
+ pdev_save_srm_config(dev);
+ if ((dev->class >> 8) != PCI_CLASS_BRIDGE_PCI)
+ pcibios_fixup_device_resources(dev, bus);
+ }
+}
+
+void __init
+pcibios_update_irq(struct pci_dev *dev, int irq)
+{
+ pci_write_config_byte(dev, PCI_INTERRUPT_LINE, irq);
+}
+
+/* Most Alphas have straight-forward swizzling needs. */
+
+u8 __init
+common_swizzle(struct pci_dev *dev, u8 *pinp)
+{
+ u8 pin = *pinp;
+
+ while (dev->bus->parent) {
+ pin = bridge_swizzle(pin, PCI_SLOT(dev->devfn));
+ /* Move up the chain of bridges. */
+ dev = dev->bus->self;
+ }
+ *pinp = pin;
+
+ /* The slot is the slot of the last bridge. */
+ return PCI_SLOT(dev->devfn);
+}
+
+void __devinit
+pcibios_resource_to_bus(struct pci_dev *dev, struct pci_bus_region *region,
+ struct resource *res)
+{
+ struct pci_controller *hose = (struct pci_controller *)dev->sysdata;
+ unsigned long offset = 0;
+
+ if (res->flags & IORESOURCE_IO)
+ offset = hose->io_space->start;
+ else if (res->flags & IORESOURCE_MEM)
+ offset = hose->mem_space->start;
+
+ region->start = res->start - offset;
+ region->end = res->end - offset;
+}
+
+#ifdef CONFIG_HOTPLUG
+EXPORT_SYMBOL(pcibios_resource_to_bus);
+#endif
+
+int
+pcibios_enable_device(struct pci_dev *dev, int mask)
+{
+ u16 cmd, oldcmd;
+ int i;
+
+ pci_read_config_word(dev, PCI_COMMAND, &cmd);
+ oldcmd = cmd;
+
+ for (i = 0; i < PCI_NUM_RESOURCES; i++) {
+ struct resource *res = &dev->resource[i];
+
+ if (res->flags & IORESOURCE_IO)
+ cmd |= PCI_COMMAND_IO;
+ else if (res->flags & IORESOURCE_MEM)
+ cmd |= PCI_COMMAND_MEMORY;
+ }
+
+ if (cmd != oldcmd) {
+ printk(KERN_DEBUG "PCI: Enabling device: (%s), cmd %x\n",
+ pci_name(dev), cmd);
+ /* Enable the appropriate bits in the PCI command register. */
+ pci_write_config_word(dev, PCI_COMMAND, cmd);
+ }
+ return 0;
+}
+
+/*
+ * If we set up a device for bus mastering, we need to check the latency
+ * timer as certain firmware forgets to set it properly, as seen
+ * on SX164 and LX164 with SRM.
+ */
+void
+pcibios_set_master(struct pci_dev *dev)
+{
+ u8 lat;
+ pci_read_config_byte(dev, PCI_LATENCY_TIMER, &lat);
+ if (lat >= 16) return;
+ printk("PCI: Setting latency timer of device %s to 64\n",
+ pci_name(dev));
+ pci_write_config_byte(dev, PCI_LATENCY_TIMER, 64);
+}
+
+static void __init
+pcibios_claim_one_bus(struct pci_bus *b)
+{
+ struct pci_dev *dev;
+ struct pci_bus *child_bus;
+
+ list_for_each_entry(dev, &b->devices, bus_list) {
+ int i;
+
+ for (i = 0; i < PCI_NUM_RESOURCES; i++) {
+ struct resource *r = &dev->resource[i];
+
+ if (r->parent || !r->start || !r->flags)
+ continue;
+ pci_claim_resource(dev, i);
+ }
+ }
+
+ list_for_each_entry(child_bus, &b->children, node)
+ pcibios_claim_one_bus(child_bus);
+}
+
+static void __init
+pcibios_claim_console_setup(void)
+{
+ struct pci_bus *b;
+
+ list_for_each_entry(b, &pci_root_buses, node)
+ pcibios_claim_one_bus(b);
+}
+
+void __init
+common_init_pci(void)
+{
+ struct pci_controller *hose;
+ struct pci_bus *bus;
+ int next_busno;
+ int need_domain_info = 0;
+
+ /* Scan all of the recorded PCI controllers. */
+ for (next_busno = 0, hose = hose_head; hose; hose = hose->next) {
+ bus = pci_scan_bus(next_busno, alpha_mv.pci_ops, hose);
+ hose->bus = bus;
+ hose->need_domain_info = need_domain_info;
+ next_busno = bus->subordinate + 1;
+ /* Don't allow 8-bit bus number overflow inside the hose -
+ reserve some space for bridges. */
+ if (next_busno > 224) {
+ next_busno = 0;
+ need_domain_info = 1;
+ }
+ }
+
+ if (pci_probe_only)
+ pcibios_claim_console_setup();
+
+ pci_assign_unassigned_resources();
+ pci_fixup_irqs(alpha_mv.pci_swizzle, alpha_mv.pci_map_irq);
+}
+
+
+struct pci_controller * __init
+alloc_pci_controller(void)
+{
+ struct pci_controller *hose;
+
+ hose = alloc_bootmem(sizeof(*hose));
+
+ *hose_tail = hose;
+ hose_tail = &hose->next;
+
+ return hose;
+}
+
+struct resource * __init
+alloc_resource(void)
+{
+ struct resource *res;
+
+ res = alloc_bootmem(sizeof(*res));
+
+ return res;
+}
+
+
+/* Provide information on locations of various I/O regions in physical
+ memory. Do this on a per-card basis so that we choose the right hose. */
+
+asmlinkage long
+sys_pciconfig_iobase(long which, unsigned long bus, unsigned long dfn)
+{
+ struct pci_controller *hose;
+ struct pci_dev *dev;
+
+ /* from hose or from bus.devfn */
+ if (which & IOBASE_FROM_HOSE) {
+ for(hose = hose_head; hose; hose = hose->next)
+ if (hose->index == bus) break;
+ if (!hose) return -ENODEV;
+ } else {
+ /* Special hook for ISA access. */
+ if (bus == 0 && dfn == 0) {
+ hose = pci_isa_hose;
+ } else {
+ dev = pci_find_slot(bus, dfn);
+ if (!dev)
+ return -ENODEV;
+ hose = dev->sysdata;
+ }
+ }
+
+ switch (which & ~IOBASE_FROM_HOSE) {
+ case IOBASE_HOSE:
+ return hose->index;
+ case IOBASE_SPARSE_MEM:
+ return hose->sparse_mem_base;
+ case IOBASE_DENSE_MEM:
+ return hose->dense_mem_base;
+ case IOBASE_SPARSE_IO:
+ return hose->sparse_io_base;
+ case IOBASE_DENSE_IO:
+ return hose->dense_io_base;
+ case IOBASE_ROOT_BUS:
+ return hose->bus->number;
+ }
+
+ return -EOPNOTSUPP;
+}
+
+/* Create an __iomem token from a PCI BAR. Copied from lib/iomap.c with
+ no changes, since we don't want the other things in that object file. */
+
+void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen)
+{
+ unsigned long start = pci_resource_start(dev, bar);
+ unsigned long len = pci_resource_len(dev, bar);
+ unsigned long flags = pci_resource_flags(dev, bar);
+
+ if (!len || !start)
+ return NULL;
+ if (maxlen && len > maxlen)
+ len = maxlen;
+ if (flags & IORESOURCE_IO)
+ return ioport_map(start, len);
+ if (flags & IORESOURCE_MEM) {
+ /* Not checking IORESOURCE_CACHEABLE because alpha does
+ not distinguish between ioremap and ioremap_nocache. */
+ return ioremap(start, len);
+ }
+ return NULL;
+}
+
+/* Destroy that token. Not copied from lib/iomap.c. */
+
+void pci_iounmap(struct pci_dev *dev, void __iomem * addr)
+{
+ if (__is_mmio(addr))
+ iounmap(addr);
+}
+
+EXPORT_SYMBOL(pci_iomap);
+EXPORT_SYMBOL(pci_iounmap);
diff --git a/arch/alpha/kernel/pci_impl.h b/arch/alpha/kernel/pci_impl.h
new file mode 100644
index 000000000000..f8b74995a002
--- /dev/null
+++ b/arch/alpha/kernel/pci_impl.h
@@ -0,0 +1,209 @@
+/*
+ * linux/arch/alpha/kernel/pci_impl.h
+ *
+ * This file contains declarations and inline functions for interfacing
+ * with the PCI initialization routines.
+ */
+
+struct pci_dev;
+struct pci_controller;
+struct pci_iommu_arena;
+
+/*
+ * We can't just blindly use 64K for machines with EISA busses; they
+ * may also have PCI-PCI bridges present, and then we'd configure the
+ * bridge incorrectly.
+ *
+ * Also, we start at 0x8000 or 0x9000, in hopes to get all devices'
+ * IO space areas allocated *before* 0xC000; this is because certain
+ * BIOSes (Millennium for one) use PCI Config space "mechanism #2"
+ * accesses to probe the bus. If a device's registers appear at 0xC000,
+ * it may see an INx/OUTx at that address during BIOS emulation of the
+ * VGA BIOS, and some cards, notably Adaptec 2940UW, take mortal offense.
+ */
+
+#define EISA_DEFAULT_IO_BASE 0x9000 /* start above 8th slot */
+#define DEFAULT_IO_BASE 0x8000 /* start at 8th slot */
+
+/*
+ * We try to make the DEFAULT_MEM_BASE addresses *always* have more than
+ * a single bit set. This is so that devices like the broken Myrinet card
+ * will always have a PCI memory address that will never match a IDSEL
+ * address in PCI Config space, which can cause problems with early rev cards.
+ */
+
+/*
+ * An XL is AVANTI (APECS) family, *but* it has only 27 bits of ISA address
+ * that get passed through the PCI<->ISA bridge chip. Although this causes
+ * us to set the PCI->Mem window bases lower than normal, we still allocate
+ * PCI bus devices' memory addresses *below* the low DMA mapping window,
+ * and hope they fit below 64Mb (to avoid conflicts), and so that they can
+ * be accessed via SPARSE space.
+ *
+ * We accept the risk that a broken Myrinet card will be put into a true XL
+ * and thus can more easily run into the problem described below.
+ */
+#define XL_DEFAULT_MEM_BASE ((16+2)*1024*1024) /* 16M to 64M-1 is avail */
+
+/*
+ * APECS and LCA have only 34 bits for physical addresses, thus limiting PCI
+ * bus memory addresses for SPARSE access to be less than 128Mb.
+ */
+#define APECS_AND_LCA_DEFAULT_MEM_BASE ((16+2)*1024*1024)
+
+/*
+ * Because MCPCIA and T2 core logic support more bits for
+ * physical addresses, they should allow an expanded range of SPARSE
+ * memory addresses. However, we do not use them all, in order to
+ * avoid the HAE manipulation that would be needed.
+ */
+#define MCPCIA_DEFAULT_MEM_BASE ((32+2)*1024*1024)
+#define T2_DEFAULT_MEM_BASE ((16+1)*1024*1024)
+
+/*
+ * Because CIA and PYXIS have more bits for physical addresses,
+ * they support an expanded range of SPARSE memory addresses.
+ */
+#define DEFAULT_MEM_BASE ((128+16)*1024*1024)
+
+/* ??? Experimenting with no HAE for CIA. */
+#define CIA_DEFAULT_MEM_BASE ((32+2)*1024*1024)
+
+#define IRONGATE_DEFAULT_MEM_BASE ((256*8-16)*1024*1024)
+
+#define DEFAULT_AGP_APER_SIZE (64*1024*1024)
+
+/*
+ * A small note about bridges and interrupts. The DECchip 21050 (and
+ * later) adheres to the PCI-PCI bridge specification. This says that
+ * the interrupts on the other side of a bridge are swizzled in the
+ * following manner:
+ *
+ * Dev Interrupt Interrupt
+ * Pin on Pin on
+ * Device Connector
+ *
+ * 4 A A
+ * B B
+ * C C
+ * D D
+ *
+ * 5 A B
+ * B C
+ * C D
+ * D A
+ *
+ * 6 A C
+ * B D
+ * C A
+ * D B
+ *
+ * 7 A D
+ * B A
+ * C B
+ * D C
+ *
+ * Where A = pin 1, B = pin 2 and so on and pin=0 = default = A.
+ * Thus, each swizzle is ((pin-1) + (device#-4)) % 4
+ *
+ * The following code swizzles for exactly one bridge. The routine
+ * common_swizzle below handles multiple bridges. But there are a
+ * couple boards that do strange things, so we define this here.
+ */
+
+static inline u8 bridge_swizzle(u8 pin, u8 slot)
+{
+ return (((pin-1) + slot) % 4) + 1;
+}
+
+
+/* The following macro is used to implement the table-based irq mapping
+ function for all single-bus Alphas. */
+
+#define COMMON_TABLE_LOOKUP \
+({ long _ctl_ = -1; \
+ if (slot >= min_idsel && slot <= max_idsel && pin < irqs_per_slot) \
+ _ctl_ = irq_tab[slot - min_idsel][pin]; \
+ _ctl_; })
+
+
+/* A PCI IOMMU allocation arena. There are typically two of these
+ regions per bus. */
+/* ??? The 8400 has a 32-byte pte entry, and the entire table apparently
+ lives directly on the host bridge (no tlb?). We don't support this
+ machine, but if we ever did, we'd need to parameterize all this quite
+ a bit further. Probably with per-bus operation tables. */
+
+struct pci_iommu_arena
+{
+ spinlock_t lock;
+ struct pci_controller *hose;
+#define IOMMU_INVALID_PTE 0x2 /* 32:63 bits MBZ */
+#define IOMMU_RESERVED_PTE 0xface
+ unsigned long *ptes;
+ dma_addr_t dma_base;
+ unsigned int size;
+ unsigned int next_entry;
+ unsigned int align_entry;
+};
+
+#if defined(CONFIG_ALPHA_SRM) && \
+ (defined(CONFIG_ALPHA_CIA) || defined(CONFIG_ALPHA_LCA))
+# define NEED_SRM_SAVE_RESTORE
+#else
+# undef NEED_SRM_SAVE_RESTORE
+#endif
+
+#if defined(CONFIG_ALPHA_GENERIC) || defined(NEED_SRM_SAVE_RESTORE)
+# define ALPHA_RESTORE_SRM_SETUP
+#else
+# undef ALPHA_RESTORE_SRM_SETUP
+#endif
+
+#ifdef ALPHA_RESTORE_SRM_SETUP
+/* Store PCI device configuration left by SRM here. */
+struct pdev_srm_saved_conf
+{
+ struct pdev_srm_saved_conf *next;
+ struct pci_dev *dev;
+};
+
+extern void pci_restore_srm_config(void);
+#else
+#define pdev_save_srm_config(dev) do {} while (0)
+#define pci_restore_srm_config() do {} while (0)
+#endif
+
+/* The hose list. */
+extern struct pci_controller *hose_head, **hose_tail;
+extern struct pci_controller *pci_isa_hose;
+
+/* Indicate that we trust the console to configure things properly. */
+extern int pci_probe_only;
+
+extern unsigned long alpha_agpgart_size;
+
+extern void common_init_pci(void);
+extern u8 common_swizzle(struct pci_dev *, u8 *);
+extern struct pci_controller *alloc_pci_controller(void);
+extern struct resource *alloc_resource(void);
+
+extern struct pci_iommu_arena *iommu_arena_new_node(int,
+ struct pci_controller *,
+ dma_addr_t, unsigned long,
+ unsigned long);
+extern struct pci_iommu_arena *iommu_arena_new(struct pci_controller *,
+ dma_addr_t, unsigned long,
+ unsigned long);
+extern const char *const pci_io_names[];
+extern const char *const pci_mem_names[];
+extern const char pci_hae0_name[];
+
+extern unsigned long size_for_memory(unsigned long max);
+
+extern int iommu_reserve(struct pci_iommu_arena *, long, long);
+extern int iommu_release(struct pci_iommu_arena *, long, long);
+extern int iommu_bind(struct pci_iommu_arena *, long, long, unsigned long *);
+extern int iommu_unbind(struct pci_iommu_arena *, long, long);
+
+
diff --git a/arch/alpha/kernel/pci_iommu.c b/arch/alpha/kernel/pci_iommu.c
new file mode 100644
index 000000000000..7cb23f12ecbd
--- /dev/null
+++ b/arch/alpha/kernel/pci_iommu.c
@@ -0,0 +1,971 @@
+/*
+ * linux/arch/alpha/kernel/pci_iommu.c
+ */
+
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/bootmem.h>
+
+#include <asm/io.h>
+#include <asm/hwrpb.h>
+
+#include "proto.h"
+#include "pci_impl.h"
+
+
+#define DEBUG_ALLOC 0
+#if DEBUG_ALLOC > 0
+# define DBGA(args...) printk(KERN_DEBUG args)
+#else
+# define DBGA(args...)
+#endif
+#if DEBUG_ALLOC > 1
+# define DBGA2(args...) printk(KERN_DEBUG args)
+#else
+# define DBGA2(args...)
+#endif
+
+#define DEBUG_NODIRECT 0
+#define DEBUG_FORCEDAC 0
+
+#define ISA_DMA_MASK 0x00ffffff
+
+static inline unsigned long
+mk_iommu_pte(unsigned long paddr)
+{
+ return (paddr >> (PAGE_SHIFT-1)) | 1;
+}
+
+static inline long
+calc_npages(long bytes)
+{
+ return (bytes + PAGE_SIZE - 1) >> PAGE_SHIFT;
+}
+
+
+/* Return the minimum of MAX or the first power of two larger
+ than main memory. */
+
+unsigned long
+size_for_memory(unsigned long max)
+{
+ unsigned long mem = max_low_pfn << PAGE_SHIFT;
+ if (mem < max)
+ max = 1UL << ceil_log2(mem);
+ return max;
+}
+
+struct pci_iommu_arena *
+iommu_arena_new_node(int nid, struct pci_controller *hose, dma_addr_t base,
+ unsigned long window_size, unsigned long align)
+{
+ unsigned long mem_size;
+ struct pci_iommu_arena *arena;
+
+ mem_size = window_size / (PAGE_SIZE / sizeof(unsigned long));
+
+ /* Note that the TLB lookup logic uses bitwise concatenation,
+ not addition, so the required arena alignment is based on
+ the size of the window. Retain the align parameter so that
+ particular systems can over-align the arena. */
+ if (align < mem_size)
+ align = mem_size;
+
+
+#ifdef CONFIG_DISCONTIGMEM
+
+ if (!NODE_DATA(nid) ||
+ (NULL == (arena = alloc_bootmem_node(NODE_DATA(nid),
+ sizeof(*arena))))) {
+ printk("%s: couldn't allocate arena from node %d\n"
+ " falling back to system-wide allocation\n",
+ __FUNCTION__, nid);
+ arena = alloc_bootmem(sizeof(*arena));
+ }
+
+ if (!NODE_DATA(nid) ||
+ (NULL == (arena->ptes = __alloc_bootmem_node(NODE_DATA(nid),
+ mem_size,
+ align,
+ 0)))) {
+ printk("%s: couldn't allocate arena ptes from node %d\n"
+ " falling back to system-wide allocation\n",
+ __FUNCTION__, nid);
+ arena->ptes = __alloc_bootmem(mem_size, align, 0);
+ }
+
+#else /* CONFIG_DISCONTIGMEM */
+
+ arena = alloc_bootmem(sizeof(*arena));
+ arena->ptes = __alloc_bootmem(mem_size, align, 0);
+
+#endif /* CONFIG_DISCONTIGMEM */
+
+ spin_lock_init(&arena->lock);
+ arena->hose = hose;
+ arena->dma_base = base;
+ arena->size = window_size;
+ arena->next_entry = 0;
+
+ /* Align allocations to a multiple of a page size. Not needed
+ unless there are chip bugs. */
+ arena->align_entry = 1;
+
+ return arena;
+}
+
+struct pci_iommu_arena *
+iommu_arena_new(struct pci_controller *hose, dma_addr_t base,
+ unsigned long window_size, unsigned long align)
+{
+ return iommu_arena_new_node(0, hose, base, window_size, align);
+}
+
+/* Must be called with the arena lock held */
+static long
+iommu_arena_find_pages(struct pci_iommu_arena *arena, long n, long mask)
+{
+ unsigned long *ptes;
+ long i, p, nent;
+
+ /* Search forward for the first mask-aligned sequence of N free ptes */
+ ptes = arena->ptes;
+ nent = arena->size >> PAGE_SHIFT;
+ p = (arena->next_entry + mask) & ~mask;
+ i = 0;
+ while (i < n && p+i < nent) {
+ if (ptes[p+i])
+ p = (p + i + 1 + mask) & ~mask, i = 0;
+ else
+ i = i + 1;
+ }
+
+ if (i < n) {
+ /* Reached the end. Flush the TLB and restart the
+ search from the beginning. */
+ alpha_mv.mv_pci_tbi(arena->hose, 0, -1);
+
+ p = 0, i = 0;
+ while (i < n && p+i < nent) {
+ if (ptes[p+i])
+ p = (p + i + 1 + mask) & ~mask, i = 0;
+ else
+ i = i + 1;
+ }
+
+ if (i < n)
+ return -1;
+ }
+
+ /* Success. It's the responsibility of the caller to mark them
+ in use before releasing the lock */
+ return p;
+}
+
+static long
+iommu_arena_alloc(struct pci_iommu_arena *arena, long n, unsigned int align)
+{
+ unsigned long flags;
+ unsigned long *ptes;
+ long i, p, mask;
+
+ spin_lock_irqsave(&arena->lock, flags);
+
+ /* Search for N empty ptes */
+ ptes = arena->ptes;
+ mask = max(align, arena->align_entry) - 1;
+ p = iommu_arena_find_pages(arena, n, mask);
+ if (p < 0) {
+ spin_unlock_irqrestore(&arena->lock, flags);
+ return -1;
+ }
+
+ /* Success. Mark them all in use, ie not zero and invalid
+ for the iommu tlb that could load them from under us.
+ The chip specific bits will fill this in with something
+ kosher when we return. */
+ for (i = 0; i < n; ++i)
+ ptes[p+i] = IOMMU_INVALID_PTE;
+
+ arena->next_entry = p + n;
+ spin_unlock_irqrestore(&arena->lock, flags);
+
+ return p;
+}
+
+static void
+iommu_arena_free(struct pci_iommu_arena *arena, long ofs, long n)
+{
+ unsigned long *p;
+ long i;
+
+ p = arena->ptes + ofs;
+ for (i = 0; i < n; ++i)
+ p[i] = 0;
+}
+
+/* Map a single buffer of the indicated size for PCI DMA in streaming
+ mode. The 32-bit PCI bus mastering address to use is returned.
+ Once the device is given the dma address, the device owns this memory
+ until either pci_unmap_single or pci_dma_sync_single is performed. */
+
+static dma_addr_t
+pci_map_single_1(struct pci_dev *pdev, void *cpu_addr, size_t size,
+ int dac_allowed)
+{
+ struct pci_controller *hose = pdev ? pdev->sysdata : pci_isa_hose;
+ dma_addr_t max_dma = pdev ? pdev->dma_mask : ISA_DMA_MASK;
+ struct pci_iommu_arena *arena;
+ long npages, dma_ofs, i;
+ unsigned long paddr;
+ dma_addr_t ret;
+ unsigned int align = 0;
+
+ paddr = __pa(cpu_addr);
+
+#if !DEBUG_NODIRECT
+ /* First check to see if we can use the direct map window. */
+ if (paddr + size + __direct_map_base - 1 <= max_dma
+ && paddr + size <= __direct_map_size) {
+ ret = paddr + __direct_map_base;
+
+ DBGA2("pci_map_single: [%p,%lx] -> direct %lx from %p\n",
+ cpu_addr, size, ret, __builtin_return_address(0));
+
+ return ret;
+ }
+#endif
+
+ /* Next, use DAC if selected earlier. */
+ if (dac_allowed) {
+ ret = paddr + alpha_mv.pci_dac_offset;
+
+ DBGA2("pci_map_single: [%p,%lx] -> DAC %lx from %p\n",
+ cpu_addr, size, ret, __builtin_return_address(0));
+
+ return ret;
+ }
+
+ /* If the machine doesn't define a pci_tbi routine, we have to
+ assume it doesn't support sg mapping, and, since we tried to
+ use direct_map above, it now must be considered an error. */
+ if (! alpha_mv.mv_pci_tbi) {
+ static int been_here = 0; /* Only print the message once. */
+ if (!been_here) {
+ printk(KERN_WARNING "pci_map_single: no HW sg\n");
+ been_here = 1;
+ }
+ return 0;
+ }
+
+ arena = hose->sg_pci;
+ if (!arena || arena->dma_base + arena->size - 1 > max_dma)
+ arena = hose->sg_isa;
+
+ npages = calc_npages((paddr & ~PAGE_MASK) + size);
+
+ /* Force allocation to 64KB boundary for ISA bridges. */
+ if (pdev && pdev == isa_bridge)
+ align = 8;
+ dma_ofs = iommu_arena_alloc(arena, npages, align);
+ if (dma_ofs < 0) {
+ printk(KERN_WARNING "pci_map_single failed: "
+ "could not allocate dma page tables\n");
+ return 0;
+ }
+
+ paddr &= PAGE_MASK;
+ for (i = 0; i < npages; ++i, paddr += PAGE_SIZE)
+ arena->ptes[i + dma_ofs] = mk_iommu_pte(paddr);
+
+ ret = arena->dma_base + dma_ofs * PAGE_SIZE;
+ ret += (unsigned long)cpu_addr & ~PAGE_MASK;
+
+ DBGA2("pci_map_single: [%p,%lx] np %ld -> sg %lx from %p\n",
+ cpu_addr, size, npages, ret, __builtin_return_address(0));
+
+ return ret;
+}
+
+dma_addr_t
+pci_map_single(struct pci_dev *pdev, void *cpu_addr, size_t size, int dir)
+{
+ int dac_allowed;
+
+ if (dir == PCI_DMA_NONE)
+ BUG();
+
+ dac_allowed = pdev ? pci_dac_dma_supported(pdev, pdev->dma_mask) : 0;
+ return pci_map_single_1(pdev, cpu_addr, size, dac_allowed);
+}
+
+dma_addr_t
+pci_map_page(struct pci_dev *pdev, struct page *page, unsigned long offset,
+ size_t size, int dir)
+{
+ int dac_allowed;
+
+ if (dir == PCI_DMA_NONE)
+ BUG();
+
+ dac_allowed = pdev ? pci_dac_dma_supported(pdev, pdev->dma_mask) : 0;
+ return pci_map_single_1(pdev, (char *)page_address(page) + offset,
+ size, dac_allowed);
+}
+
+/* Unmap a single streaming mode DMA translation. The DMA_ADDR and
+ SIZE must match what was provided for in a previous pci_map_single
+ call. All other usages are undefined. After this call, reads by
+ the cpu to the buffer are guaranteed to see whatever the device
+ wrote there. */
+
+void
+pci_unmap_single(struct pci_dev *pdev, dma_addr_t dma_addr, size_t size,
+ int direction)
+{
+ unsigned long flags;
+ struct pci_controller *hose = pdev ? pdev->sysdata : pci_isa_hose;
+ struct pci_iommu_arena *arena;
+ long dma_ofs, npages;
+
+ if (direction == PCI_DMA_NONE)
+ BUG();
+
+ if (dma_addr >= __direct_map_base
+ && dma_addr < __direct_map_base + __direct_map_size) {
+ /* Nothing to do. */
+
+ DBGA2("pci_unmap_single: direct [%lx,%lx] from %p\n",
+ dma_addr, size, __builtin_return_address(0));
+
+ return;
+ }
+
+ if (dma_addr > 0xffffffff) {
+ DBGA2("pci64_unmap_single: DAC [%lx,%lx] from %p\n",
+ dma_addr, size, __builtin_return_address(0));
+ return;
+ }
+
+ arena = hose->sg_pci;
+ if (!arena || dma_addr < arena->dma_base)
+ arena = hose->sg_isa;
+
+ dma_ofs = (dma_addr - arena->dma_base) >> PAGE_SHIFT;
+ if (dma_ofs * PAGE_SIZE >= arena->size) {
+ printk(KERN_ERR "Bogus pci_unmap_single: dma_addr %lx "
+ " base %lx size %x\n", dma_addr, arena->dma_base,
+ arena->size);
+ return;
+ BUG();
+ }
+
+ npages = calc_npages((dma_addr & ~PAGE_MASK) + size);
+
+ spin_lock_irqsave(&arena->lock, flags);
+
+ iommu_arena_free(arena, dma_ofs, npages);
+
+ /* If we're freeing ptes above the `next_entry' pointer (they
+ may have snuck back into the TLB since the last wrap flush),
+ we need to flush the TLB before reallocating the latter. */
+ if (dma_ofs >= arena->next_entry)
+ alpha_mv.mv_pci_tbi(hose, dma_addr, dma_addr + size - 1);
+
+ spin_unlock_irqrestore(&arena->lock, flags);
+
+ DBGA2("pci_unmap_single: sg [%lx,%lx] np %ld from %p\n",
+ dma_addr, size, npages, __builtin_return_address(0));
+}
+
+void
+pci_unmap_page(struct pci_dev *pdev, dma_addr_t dma_addr,
+ size_t size, int direction)
+{
+ pci_unmap_single(pdev, dma_addr, size, direction);
+}
+
+/* Allocate and map kernel buffer using consistent mode DMA for PCI
+ device. Returns non-NULL cpu-view pointer to the buffer if
+ successful and sets *DMA_ADDRP to the pci side dma address as well,
+ else DMA_ADDRP is undefined. */
+
+void *
+pci_alloc_consistent(struct pci_dev *pdev, size_t size, dma_addr_t *dma_addrp)
+{
+ void *cpu_addr;
+ long order = get_order(size);
+ int gfp = GFP_ATOMIC;
+
+try_again:
+ cpu_addr = (void *)__get_free_pages(gfp, order);
+ if (! cpu_addr) {
+ printk(KERN_INFO "pci_alloc_consistent: "
+ "get_free_pages failed from %p\n",
+ __builtin_return_address(0));
+ /* ??? Really atomic allocation? Otherwise we could play
+ with vmalloc and sg if we can't find contiguous memory. */
+ return NULL;
+ }
+ memset(cpu_addr, 0, size);
+
+ *dma_addrp = pci_map_single_1(pdev, cpu_addr, size, 0);
+ if (*dma_addrp == 0) {
+ free_pages((unsigned long)cpu_addr, order);
+ if (alpha_mv.mv_pci_tbi || (gfp & GFP_DMA))
+ return NULL;
+ /* The address doesn't fit required mask and we
+ do not have iommu. Try again with GFP_DMA. */
+ gfp |= GFP_DMA;
+ goto try_again;
+ }
+
+ DBGA2("pci_alloc_consistent: %lx -> [%p,%x] from %p\n",
+ size, cpu_addr, *dma_addrp, __builtin_return_address(0));
+
+ return cpu_addr;
+}
+
+/* Free and unmap a consistent DMA buffer. CPU_ADDR and DMA_ADDR must
+ be values that were returned from pci_alloc_consistent. SIZE must
+ be the same as what as passed into pci_alloc_consistent.
+ References to the memory and mappings associated with CPU_ADDR or
+ DMA_ADDR past this call are illegal. */
+
+void
+pci_free_consistent(struct pci_dev *pdev, size_t size, void *cpu_addr,
+ dma_addr_t dma_addr)
+{
+ pci_unmap_single(pdev, dma_addr, size, PCI_DMA_BIDIRECTIONAL);
+ free_pages((unsigned long)cpu_addr, get_order(size));
+
+ DBGA2("pci_free_consistent: [%x,%lx] from %p\n",
+ dma_addr, size, __builtin_return_address(0));
+}
+
+
+/* Classify the elements of the scatterlist. Write dma_address
+ of each element with:
+ 0 : Followers all physically adjacent.
+ 1 : Followers all virtually adjacent.
+ -1 : Not leader, physically adjacent to previous.
+ -2 : Not leader, virtually adjacent to previous.
+ Write dma_length of each leader with the combined lengths of
+ the mergable followers. */
+
+#define SG_ENT_VIRT_ADDRESS(SG) (page_address((SG)->page) + (SG)->offset)
+#define SG_ENT_PHYS_ADDRESS(SG) __pa(SG_ENT_VIRT_ADDRESS(SG))
+
+static void
+sg_classify(struct scatterlist *sg, struct scatterlist *end, int virt_ok)
+{
+ unsigned long next_paddr;
+ struct scatterlist *leader;
+ long leader_flag, leader_length;
+
+ leader = sg;
+ leader_flag = 0;
+ leader_length = leader->length;
+ next_paddr = SG_ENT_PHYS_ADDRESS(leader) + leader_length;
+
+ for (++sg; sg < end; ++sg) {
+ unsigned long addr, len;
+ addr = SG_ENT_PHYS_ADDRESS(sg);
+ len = sg->length;
+
+ if (next_paddr == addr) {
+ sg->dma_address = -1;
+ leader_length += len;
+ } else if (((next_paddr | addr) & ~PAGE_MASK) == 0 && virt_ok) {
+ sg->dma_address = -2;
+ leader_flag = 1;
+ leader_length += len;
+ } else {
+ leader->dma_address = leader_flag;
+ leader->dma_length = leader_length;
+ leader = sg;
+ leader_flag = 0;
+ leader_length = len;
+ }
+
+ next_paddr = addr + len;
+ }
+
+ leader->dma_address = leader_flag;
+ leader->dma_length = leader_length;
+}
+
+/* Given a scatterlist leader, choose an allocation method and fill
+ in the blanks. */
+
+static int
+sg_fill(struct scatterlist *leader, struct scatterlist *end,
+ struct scatterlist *out, struct pci_iommu_arena *arena,
+ dma_addr_t max_dma, int dac_allowed)
+{
+ unsigned long paddr = SG_ENT_PHYS_ADDRESS(leader);
+ long size = leader->dma_length;
+ struct scatterlist *sg;
+ unsigned long *ptes;
+ long npages, dma_ofs, i;
+
+#if !DEBUG_NODIRECT
+ /* If everything is physically contiguous, and the addresses
+ fall into the direct-map window, use it. */
+ if (leader->dma_address == 0
+ && paddr + size + __direct_map_base - 1 <= max_dma
+ && paddr + size <= __direct_map_size) {
+ out->dma_address = paddr + __direct_map_base;
+ out->dma_length = size;
+
+ DBGA(" sg_fill: [%p,%lx] -> direct %lx\n",
+ __va(paddr), size, out->dma_address);
+
+ return 0;
+ }
+#endif
+
+ /* If physically contiguous and DAC is available, use it. */
+ if (leader->dma_address == 0 && dac_allowed) {
+ out->dma_address = paddr + alpha_mv.pci_dac_offset;
+ out->dma_length = size;
+
+ DBGA(" sg_fill: [%p,%lx] -> DAC %lx\n",
+ __va(paddr), size, out->dma_address);
+
+ return 0;
+ }
+
+ /* Otherwise, we'll use the iommu to make the pages virtually
+ contiguous. */
+
+ paddr &= ~PAGE_MASK;
+ npages = calc_npages(paddr + size);
+ dma_ofs = iommu_arena_alloc(arena, npages, 0);
+ if (dma_ofs < 0) {
+ /* If we attempted a direct map above but failed, die. */
+ if (leader->dma_address == 0)
+ return -1;
+
+ /* Otherwise, break up the remaining virtually contiguous
+ hunks into individual direct maps and retry. */
+ sg_classify(leader, end, 0);
+ return sg_fill(leader, end, out, arena, max_dma, dac_allowed);
+ }
+
+ out->dma_address = arena->dma_base + dma_ofs*PAGE_SIZE + paddr;
+ out->dma_length = size;
+
+ DBGA(" sg_fill: [%p,%lx] -> sg %lx np %ld\n",
+ __va(paddr), size, out->dma_address, npages);
+
+ /* All virtually contiguous. We need to find the length of each
+ physically contiguous subsegment to fill in the ptes. */
+ ptes = &arena->ptes[dma_ofs];
+ sg = leader;
+ do {
+#if DEBUG_ALLOC > 0
+ struct scatterlist *last_sg = sg;
+#endif
+
+ size = sg->length;
+ paddr = SG_ENT_PHYS_ADDRESS(sg);
+
+ while (sg+1 < end && (int) sg[1].dma_address == -1) {
+ size += sg[1].length;
+ sg++;
+ }
+
+ npages = calc_npages((paddr & ~PAGE_MASK) + size);
+
+ paddr &= PAGE_MASK;
+ for (i = 0; i < npages; ++i, paddr += PAGE_SIZE)
+ *ptes++ = mk_iommu_pte(paddr);
+
+#if DEBUG_ALLOC > 0
+ DBGA(" (%ld) [%p,%x] np %ld\n",
+ last_sg - leader, SG_ENT_VIRT_ADDRESS(last_sg),
+ last_sg->length, npages);
+ while (++last_sg <= sg) {
+ DBGA(" (%ld) [%p,%x] cont\n",
+ last_sg - leader, SG_ENT_VIRT_ADDRESS(last_sg),
+ last_sg->length);
+ }
+#endif
+ } while (++sg < end && (int) sg->dma_address < 0);
+
+ return 1;
+}
+
+int
+pci_map_sg(struct pci_dev *pdev, struct scatterlist *sg, int nents,
+ int direction)
+{
+ struct scatterlist *start, *end, *out;
+ struct pci_controller *hose;
+ struct pci_iommu_arena *arena;
+ dma_addr_t max_dma;
+ int dac_allowed;
+
+ if (direction == PCI_DMA_NONE)
+ BUG();
+
+ dac_allowed = pdev ? pci_dac_dma_supported(pdev, pdev->dma_mask) : 0;
+
+ /* Fast path single entry scatterlists. */
+ if (nents == 1) {
+ sg->dma_length = sg->length;
+ sg->dma_address
+ = pci_map_single_1(pdev, SG_ENT_VIRT_ADDRESS(sg),
+ sg->length, dac_allowed);
+ return sg->dma_address != 0;
+ }
+
+ start = sg;
+ end = sg + nents;
+
+ /* First, prepare information about the entries. */
+ sg_classify(sg, end, alpha_mv.mv_pci_tbi != 0);
+
+ /* Second, figure out where we're going to map things. */
+ if (alpha_mv.mv_pci_tbi) {
+ hose = pdev ? pdev->sysdata : pci_isa_hose;
+ max_dma = pdev ? pdev->dma_mask : ISA_DMA_MASK;
+ arena = hose->sg_pci;
+ if (!arena || arena->dma_base + arena->size - 1 > max_dma)
+ arena = hose->sg_isa;
+ } else {
+ max_dma = -1;
+ arena = NULL;
+ hose = NULL;
+ }
+
+ /* Third, iterate over the scatterlist leaders and allocate
+ dma space as needed. */
+ for (out = sg; sg < end; ++sg) {
+ if ((int) sg->dma_address < 0)
+ continue;
+ if (sg_fill(sg, end, out, arena, max_dma, dac_allowed) < 0)
+ goto error;
+ out++;
+ }
+
+ /* Mark the end of the list for pci_unmap_sg. */
+ if (out < end)
+ out->dma_length = 0;
+
+ if (out - start == 0)
+ printk(KERN_WARNING "pci_map_sg failed: no entries?\n");
+ DBGA("pci_map_sg: %ld entries\n", out - start);
+
+ return out - start;
+
+ error:
+ printk(KERN_WARNING "pci_map_sg failed: "
+ "could not allocate dma page tables\n");
+
+ /* Some allocation failed while mapping the scatterlist
+ entries. Unmap them now. */
+ if (out > start)
+ pci_unmap_sg(pdev, start, out - start, direction);
+ return 0;
+}
+
+/* Unmap a set of streaming mode DMA translations. Again, cpu read
+ rules concerning calls here are the same as for pci_unmap_single()
+ above. */
+
+void
+pci_unmap_sg(struct pci_dev *pdev, struct scatterlist *sg, int nents,
+ int direction)
+{
+ unsigned long flags;
+ struct pci_controller *hose;
+ struct pci_iommu_arena *arena;
+ struct scatterlist *end;
+ dma_addr_t max_dma;
+ dma_addr_t fbeg, fend;
+
+ if (direction == PCI_DMA_NONE)
+ BUG();
+
+ if (! alpha_mv.mv_pci_tbi)
+ return;
+
+ hose = pdev ? pdev->sysdata : pci_isa_hose;
+ max_dma = pdev ? pdev->dma_mask : ISA_DMA_MASK;
+ arena = hose->sg_pci;
+ if (!arena || arena->dma_base + arena->size - 1 > max_dma)
+ arena = hose->sg_isa;
+
+ fbeg = -1, fend = 0;
+
+ spin_lock_irqsave(&arena->lock, flags);
+
+ for (end = sg + nents; sg < end; ++sg) {
+ dma64_addr_t addr;
+ size_t size;
+ long npages, ofs;
+ dma_addr_t tend;
+
+ addr = sg->dma_address;
+ size = sg->dma_length;
+ if (!size)
+ break;
+
+ if (addr > 0xffffffff) {
+ /* It's a DAC address -- nothing to do. */
+ DBGA(" (%ld) DAC [%lx,%lx]\n",
+ sg - end + nents, addr, size);
+ continue;
+ }
+
+ if (addr >= __direct_map_base
+ && addr < __direct_map_base + __direct_map_size) {
+ /* Nothing to do. */
+ DBGA(" (%ld) direct [%lx,%lx]\n",
+ sg - end + nents, addr, size);
+ continue;
+ }
+
+ DBGA(" (%ld) sg [%lx,%lx]\n",
+ sg - end + nents, addr, size);
+
+ npages = calc_npages((addr & ~PAGE_MASK) + size);
+ ofs = (addr - arena->dma_base) >> PAGE_SHIFT;
+ iommu_arena_free(arena, ofs, npages);
+
+ tend = addr + size - 1;
+ if (fbeg > addr) fbeg = addr;
+ if (fend < tend) fend = tend;
+ }
+
+ /* If we're freeing ptes above the `next_entry' pointer (they
+ may have snuck back into the TLB since the last wrap flush),
+ we need to flush the TLB before reallocating the latter. */
+ if ((fend - arena->dma_base) >> PAGE_SHIFT >= arena->next_entry)
+ alpha_mv.mv_pci_tbi(hose, fbeg, fend);
+
+ spin_unlock_irqrestore(&arena->lock, flags);
+
+ DBGA("pci_unmap_sg: %ld entries\n", nents - (end - sg));
+}
+
+
+/* Return whether the given PCI device DMA address mask can be
+ supported properly. */
+
+int
+pci_dma_supported(struct pci_dev *pdev, u64 mask)
+{
+ struct pci_controller *hose;
+ struct pci_iommu_arena *arena;
+
+ /* If there exists a direct map, and the mask fits either
+ the entire direct mapped space or the total system memory as
+ shifted by the map base */
+ if (__direct_map_size != 0
+ && (__direct_map_base + __direct_map_size - 1 <= mask ||
+ __direct_map_base + (max_low_pfn << PAGE_SHIFT) - 1 <= mask))
+ return 1;
+
+ /* Check that we have a scatter-gather arena that fits. */
+ hose = pdev ? pdev->sysdata : pci_isa_hose;
+ arena = hose->sg_isa;
+ if (arena && arena->dma_base + arena->size - 1 <= mask)
+ return 1;
+ arena = hose->sg_pci;
+ if (arena && arena->dma_base + arena->size - 1 <= mask)
+ return 1;
+
+ /* As last resort try ZONE_DMA. */
+ if (!__direct_map_base && MAX_DMA_ADDRESS - IDENT_ADDR - 1 <= mask)
+ return 1;
+
+ return 0;
+}
+
+
+/*
+ * AGP GART extensions to the IOMMU
+ */
+int
+iommu_reserve(struct pci_iommu_arena *arena, long pg_count, long align_mask)
+{
+ unsigned long flags;
+ unsigned long *ptes;
+ long i, p;
+
+ if (!arena) return -EINVAL;
+
+ spin_lock_irqsave(&arena->lock, flags);
+
+ /* Search for N empty ptes. */
+ ptes = arena->ptes;
+ p = iommu_arena_find_pages(arena, pg_count, align_mask);
+ if (p < 0) {
+ spin_unlock_irqrestore(&arena->lock, flags);
+ return -1;
+ }
+
+ /* Success. Mark them all reserved (ie not zero and invalid)
+ for the iommu tlb that could load them from under us.
+ They will be filled in with valid bits by _bind() */
+ for (i = 0; i < pg_count; ++i)
+ ptes[p+i] = IOMMU_RESERVED_PTE;
+
+ arena->next_entry = p + pg_count;
+ spin_unlock_irqrestore(&arena->lock, flags);
+
+ return p;
+}
+
+int
+iommu_release(struct pci_iommu_arena *arena, long pg_start, long pg_count)
+{
+ unsigned long *ptes;
+ long i;
+
+ if (!arena) return -EINVAL;
+
+ ptes = arena->ptes;
+
+ /* Make sure they're all reserved first... */
+ for(i = pg_start; i < pg_start + pg_count; i++)
+ if (ptes[i] != IOMMU_RESERVED_PTE)
+ return -EBUSY;
+
+ iommu_arena_free(arena, pg_start, pg_count);
+ return 0;
+}
+
+int
+iommu_bind(struct pci_iommu_arena *arena, long pg_start, long pg_count,
+ unsigned long *physaddrs)
+{
+ unsigned long flags;
+ unsigned long *ptes;
+ long i, j;
+
+ if (!arena) return -EINVAL;
+
+ spin_lock_irqsave(&arena->lock, flags);
+
+ ptes = arena->ptes;
+
+ for(j = pg_start; j < pg_start + pg_count; j++) {
+ if (ptes[j] != IOMMU_RESERVED_PTE) {
+ spin_unlock_irqrestore(&arena->lock, flags);
+ return -EBUSY;
+ }
+ }
+
+ for(i = 0, j = pg_start; i < pg_count; i++, j++)
+ ptes[j] = mk_iommu_pte(physaddrs[i]);
+
+ spin_unlock_irqrestore(&arena->lock, flags);
+
+ return 0;
+}
+
+int
+iommu_unbind(struct pci_iommu_arena *arena, long pg_start, long pg_count)
+{
+ unsigned long *p;
+ long i;
+
+ if (!arena) return -EINVAL;
+
+ p = arena->ptes + pg_start;
+ for(i = 0; i < pg_count; i++)
+ p[i] = IOMMU_RESERVED_PTE;
+
+ return 0;
+}
+
+/* True if the machine supports DAC addressing, and DEV can
+ make use of it given MASK. */
+
+int
+pci_dac_dma_supported(struct pci_dev *dev, u64 mask)
+{
+ dma64_addr_t dac_offset = alpha_mv.pci_dac_offset;
+ int ok = 1;
+
+ /* If this is not set, the machine doesn't support DAC at all. */
+ if (dac_offset == 0)
+ ok = 0;
+
+ /* The device has to be able to address our DAC bit. */
+ if ((dac_offset & dev->dma_mask) != dac_offset)
+ ok = 0;
+
+ /* If both conditions above are met, we are fine. */
+ DBGA("pci_dac_dma_supported %s from %p\n",
+ ok ? "yes" : "no", __builtin_return_address(0));
+
+ return ok;
+}
+
+dma64_addr_t
+pci_dac_page_to_dma(struct pci_dev *pdev, struct page *page,
+ unsigned long offset, int direction)
+{
+ return (alpha_mv.pci_dac_offset
+ + __pa(page_address(page))
+ + (dma64_addr_t) offset);
+}
+
+struct page *
+pci_dac_dma_to_page(struct pci_dev *pdev, dma64_addr_t dma_addr)
+{
+ unsigned long paddr = (dma_addr & PAGE_MASK) - alpha_mv.pci_dac_offset;
+ return virt_to_page(__va(paddr));
+}
+
+unsigned long
+pci_dac_dma_to_offset(struct pci_dev *pdev, dma64_addr_t dma_addr)
+{
+ return (dma_addr & ~PAGE_MASK);
+}
+
+
+/* Helper for generic DMA-mapping functions. */
+
+struct pci_dev *
+alpha_gendev_to_pci(struct device *dev)
+{
+ if (dev && dev->bus == &pci_bus_type)
+ return to_pci_dev(dev);
+
+ /* Assume that non-PCI devices asking for DMA are either ISA or EISA,
+ BUG() otherwise. */
+ BUG_ON(!isa_bridge);
+
+ /* Assume non-busmaster ISA DMA when dma_mask is not set (the ISA
+ bridge is bus master then). */
+ if (!dev || !dev->dma_mask || !*dev->dma_mask)
+ return isa_bridge;
+
+ /* For EISA bus masters, return isa_bridge (it might have smaller
+ dma_mask due to wiring limitations). */
+ if (*dev->dma_mask >= isa_bridge->dma_mask)
+ return isa_bridge;
+
+ /* This assumes ISA bus master with dma_mask 0xffffff. */
+ return NULL;
+}
+
+int
+dma_set_mask(struct device *dev, u64 mask)
+{
+ if (!dev->dma_mask ||
+ !pci_dma_supported(alpha_gendev_to_pci(dev), mask))
+ return -EIO;
+
+ *dev->dma_mask = mask;
+
+ return 0;
+}
diff --git a/arch/alpha/kernel/process.c b/arch/alpha/kernel/process.c
new file mode 100644
index 000000000000..4933f3ce5833
--- /dev/null
+++ b/arch/alpha/kernel/process.c
@@ -0,0 +1,528 @@
+/*
+ * linux/arch/alpha/kernel/process.c
+ *
+ * Copyright (C) 1995 Linus Torvalds
+ */
+
+/*
+ * This file handles the architecture-dependent parts of process handling.
+ */
+
+#include <linux/config.h>
+#include <linux/errno.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/smp.h>
+#include <linux/smp_lock.h>
+#include <linux/stddef.h>
+#include <linux/unistd.h>
+#include <linux/ptrace.h>
+#include <linux/slab.h>
+#include <linux/user.h>
+#include <linux/a.out.h>
+#include <linux/utsname.h>
+#include <linux/time.h>
+#include <linux/major.h>
+#include <linux/stat.h>
+#include <linux/mman.h>
+#include <linux/elfcore.h>
+#include <linux/reboot.h>
+#include <linux/tty.h>
+#include <linux/console.h>
+
+#include <asm/reg.h>
+#include <asm/uaccess.h>
+#include <asm/system.h>
+#include <asm/io.h>
+#include <asm/pgtable.h>
+#include <asm/hwrpb.h>
+#include <asm/fpu.h>
+
+#include "proto.h"
+#include "pci_impl.h"
+
+void default_idle(void)
+{
+ barrier();
+}
+
+void
+cpu_idle(void)
+{
+ while (1) {
+ void (*idle)(void) = default_idle;
+ /* FIXME -- EV6 and LCA45 know how to power down
+ the CPU. */
+
+ while (!need_resched())
+ idle();
+ schedule();
+ }
+}
+
+
+struct halt_info {
+ int mode;
+ char *restart_cmd;
+};
+
+static void
+common_shutdown_1(void *generic_ptr)
+{
+ struct halt_info *how = (struct halt_info *)generic_ptr;
+ struct percpu_struct *cpup;
+ unsigned long *pflags, flags;
+ int cpuid = smp_processor_id();
+
+ /* No point in taking interrupts anymore. */
+ local_irq_disable();
+
+ cpup = (struct percpu_struct *)
+ ((unsigned long)hwrpb + hwrpb->processor_offset
+ + hwrpb->processor_size * cpuid);
+ pflags = &cpup->flags;
+ flags = *pflags;
+
+ /* Clear reason to "default"; clear "bootstrap in progress". */
+ flags &= ~0x00ff0001UL;
+
+#ifdef CONFIG_SMP
+ /* Secondaries halt here. */
+ if (cpuid != boot_cpuid) {
+ flags |= 0x00040000UL; /* "remain halted" */
+ *pflags = flags;
+ clear_bit(cpuid, &cpu_present_mask);
+ halt();
+ }
+#endif
+
+ if (how->mode == LINUX_REBOOT_CMD_RESTART) {
+ if (!how->restart_cmd) {
+ flags |= 0x00020000UL; /* "cold bootstrap" */
+ } else {
+ /* For SRM, we could probably set environment
+ variables to get this to work. We'd have to
+ delay this until after srm_paging_stop unless
+ we ever got srm_fixup working.
+
+ At the moment, SRM will use the last boot device,
+ but the file and flags will be the defaults, when
+ doing a "warm" bootstrap. */
+ flags |= 0x00030000UL; /* "warm bootstrap" */
+ }
+ } else {
+ flags |= 0x00040000UL; /* "remain halted" */
+ }
+ *pflags = flags;
+
+#ifdef CONFIG_SMP
+ /* Wait for the secondaries to halt. */
+ cpu_clear(boot_cpuid, cpu_possible_map);
+ while (cpus_weight(cpu_possible_map))
+ barrier();
+#endif
+
+ /* If booted from SRM, reset some of the original environment. */
+ if (alpha_using_srm) {
+#ifdef CONFIG_DUMMY_CONSOLE
+ /* This has the effect of resetting the VGA video origin. */
+ take_over_console(&dummy_con, 0, MAX_NR_CONSOLES-1, 1);
+#endif
+ pci_restore_srm_config();
+ set_hae(srm_hae);
+ }
+
+ if (alpha_mv.kill_arch)
+ alpha_mv.kill_arch(how->mode);
+
+ if (! alpha_using_srm && how->mode != LINUX_REBOOT_CMD_RESTART) {
+ /* Unfortunately, since MILO doesn't currently understand
+ the hwrpb bits above, we can't reliably halt the
+ processor and keep it halted. So just loop. */
+ return;
+ }
+
+ if (alpha_using_srm)
+ srm_paging_stop();
+
+ halt();
+}
+
+static void
+common_shutdown(int mode, char *restart_cmd)
+{
+ struct halt_info args;
+ args.mode = mode;
+ args.restart_cmd = restart_cmd;
+ on_each_cpu(common_shutdown_1, &args, 1, 0);
+}
+
+void
+machine_restart(char *restart_cmd)
+{
+ common_shutdown(LINUX_REBOOT_CMD_RESTART, restart_cmd);
+}
+
+EXPORT_SYMBOL(machine_restart);
+
+void
+machine_halt(void)
+{
+ common_shutdown(LINUX_REBOOT_CMD_HALT, NULL);
+}
+
+EXPORT_SYMBOL(machine_halt);
+
+void
+machine_power_off(void)
+{
+ common_shutdown(LINUX_REBOOT_CMD_POWER_OFF, NULL);
+}
+
+EXPORT_SYMBOL(machine_power_off);
+
+/* Used by sysrq-p, among others. I don't believe r9-r15 are ever
+ saved in the context it's used. */
+
+void
+show_regs(struct pt_regs *regs)
+{
+ dik_show_regs(regs, NULL);
+}
+
+/*
+ * Re-start a thread when doing execve()
+ */
+void
+start_thread(struct pt_regs * regs, unsigned long pc, unsigned long sp)
+{
+ set_fs(USER_DS);
+ regs->pc = pc;
+ regs->ps = 8;
+ wrusp(sp);
+}
+
+/*
+ * Free current thread data structures etc..
+ */
+void
+exit_thread(void)
+{
+}
+
+void
+flush_thread(void)
+{
+ /* Arrange for each exec'ed process to start off with a clean slate
+ with respect to the FPU. This is all exceptions disabled. */
+ current_thread_info()->ieee_state = 0;
+ wrfpcr(FPCR_DYN_NORMAL | ieee_swcr_to_fpcr(0));
+
+ /* Clean slate for TLS. */
+ current_thread_info()->pcb.unique = 0;
+}
+
+void
+release_thread(struct task_struct *dead_task)
+{
+}
+
+/*
+ * "alpha_clone()".. By the time we get here, the
+ * non-volatile registers have also been saved on the
+ * stack. We do some ugly pointer stuff here.. (see
+ * also copy_thread)
+ *
+ * Notice that "fork()" is implemented in terms of clone,
+ * with parameters (SIGCHLD, 0).
+ */
+int
+alpha_clone(unsigned long clone_flags, unsigned long usp,
+ int __user *parent_tid, int __user *child_tid,
+ unsigned long tls_value, struct pt_regs *regs)
+{
+ if (!usp)
+ usp = rdusp();
+
+ return do_fork(clone_flags, usp, regs, 0, parent_tid, child_tid);
+}
+
+int
+alpha_vfork(struct pt_regs *regs)
+{
+ return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, rdusp(),
+ regs, 0, NULL, NULL);
+}
+
+/*
+ * Copy an alpha thread..
+ *
+ * Note the "stack_offset" stuff: when returning to kernel mode, we need
+ * to have some extra stack-space for the kernel stack that still exists
+ * after the "ret_from_fork". When returning to user mode, we only want
+ * the space needed by the syscall stack frame (ie "struct pt_regs").
+ * Use the passed "regs" pointer to determine how much space we need
+ * for a kernel fork().
+ */
+
+int
+copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
+ unsigned long unused,
+ struct task_struct * p, struct pt_regs * regs)
+{
+ extern void ret_from_fork(void);
+
+ struct thread_info *childti = p->thread_info;
+ struct pt_regs * childregs;
+ struct switch_stack * childstack, *stack;
+ unsigned long stack_offset, settls;
+
+ stack_offset = PAGE_SIZE - sizeof(struct pt_regs);
+ if (!(regs->ps & 8))
+ stack_offset = (PAGE_SIZE-1) & (unsigned long) regs;
+ childregs = (struct pt_regs *)
+ (stack_offset + PAGE_SIZE + (long) childti);
+
+ *childregs = *regs;
+ settls = regs->r20;
+ childregs->r0 = 0;
+ childregs->r19 = 0;
+ childregs->r20 = 1; /* OSF/1 has some strange fork() semantics. */
+ regs->r20 = 0;
+ stack = ((struct switch_stack *) regs) - 1;
+ childstack = ((struct switch_stack *) childregs) - 1;
+ *childstack = *stack;
+ childstack->r26 = (unsigned long) ret_from_fork;
+ childti->pcb.usp = usp;
+ childti->pcb.ksp = (unsigned long) childstack;
+ childti->pcb.flags = 1; /* set FEN, clear everything else */
+
+ /* Set a new TLS for the child thread? Peek back into the
+ syscall arguments that we saved on syscall entry. Oops,
+ except we'd have clobbered it with the parent/child set
+ of r20. Read the saved copy. */
+ /* Note: if CLONE_SETTLS is not set, then we must inherit the
+ value from the parent, which will have been set by the block
+ copy in dup_task_struct. This is non-intuitive, but is
+ required for proper operation in the case of a threaded
+ application calling fork. */
+ if (clone_flags & CLONE_SETTLS)
+ childti->pcb.unique = settls;
+
+ return 0;
+}
+
+/*
+ * Fill in the user structure for an ECOFF core dump.
+ */
+void
+dump_thread(struct pt_regs * pt, struct user * dump)
+{
+ /* switch stack follows right below pt_regs: */
+ struct switch_stack * sw = ((struct switch_stack *) pt) - 1;
+
+ dump->magic = CMAGIC;
+ dump->start_code = current->mm->start_code;
+ dump->start_data = current->mm->start_data;
+ dump->start_stack = rdusp() & ~(PAGE_SIZE - 1);
+ dump->u_tsize = ((current->mm->end_code - dump->start_code)
+ >> PAGE_SHIFT);
+ dump->u_dsize = ((current->mm->brk + PAGE_SIZE-1 - dump->start_data)
+ >> PAGE_SHIFT);
+ dump->u_ssize = (current->mm->start_stack - dump->start_stack
+ + PAGE_SIZE-1) >> PAGE_SHIFT;
+
+ /*
+ * We store the registers in an order/format that is
+ * compatible with DEC Unix/OSF/1 as this makes life easier
+ * for gdb.
+ */
+ dump->regs[EF_V0] = pt->r0;
+ dump->regs[EF_T0] = pt->r1;
+ dump->regs[EF_T1] = pt->r2;
+ dump->regs[EF_T2] = pt->r3;
+ dump->regs[EF_T3] = pt->r4;
+ dump->regs[EF_T4] = pt->r5;
+ dump->regs[EF_T5] = pt->r6;
+ dump->regs[EF_T6] = pt->r7;
+ dump->regs[EF_T7] = pt->r8;
+ dump->regs[EF_S0] = sw->r9;
+ dump->regs[EF_S1] = sw->r10;
+ dump->regs[EF_S2] = sw->r11;
+ dump->regs[EF_S3] = sw->r12;
+ dump->regs[EF_S4] = sw->r13;
+ dump->regs[EF_S5] = sw->r14;
+ dump->regs[EF_S6] = sw->r15;
+ dump->regs[EF_A3] = pt->r19;
+ dump->regs[EF_A4] = pt->r20;
+ dump->regs[EF_A5] = pt->r21;
+ dump->regs[EF_T8] = pt->r22;
+ dump->regs[EF_T9] = pt->r23;
+ dump->regs[EF_T10] = pt->r24;
+ dump->regs[EF_T11] = pt->r25;
+ dump->regs[EF_RA] = pt->r26;
+ dump->regs[EF_T12] = pt->r27;
+ dump->regs[EF_AT] = pt->r28;
+ dump->regs[EF_SP] = rdusp();
+ dump->regs[EF_PS] = pt->ps;
+ dump->regs[EF_PC] = pt->pc;
+ dump->regs[EF_GP] = pt->gp;
+ dump->regs[EF_A0] = pt->r16;
+ dump->regs[EF_A1] = pt->r17;
+ dump->regs[EF_A2] = pt->r18;
+ memcpy((char *)dump->regs + EF_SIZE, sw->fp, 32 * 8);
+}
+
+/*
+ * Fill in the user structure for a ELF core dump.
+ */
+void
+dump_elf_thread(elf_greg_t *dest, struct pt_regs *pt, struct thread_info *ti)
+{
+ /* switch stack follows right below pt_regs: */
+ struct switch_stack * sw = ((struct switch_stack *) pt) - 1;
+
+ dest[ 0] = pt->r0;
+ dest[ 1] = pt->r1;
+ dest[ 2] = pt->r2;
+ dest[ 3] = pt->r3;
+ dest[ 4] = pt->r4;
+ dest[ 5] = pt->r5;
+ dest[ 6] = pt->r6;
+ dest[ 7] = pt->r7;
+ dest[ 8] = pt->r8;
+ dest[ 9] = sw->r9;
+ dest[10] = sw->r10;
+ dest[11] = sw->r11;
+ dest[12] = sw->r12;
+ dest[13] = sw->r13;
+ dest[14] = sw->r14;
+ dest[15] = sw->r15;
+ dest[16] = pt->r16;
+ dest[17] = pt->r17;
+ dest[18] = pt->r18;
+ dest[19] = pt->r19;
+ dest[20] = pt->r20;
+ dest[21] = pt->r21;
+ dest[22] = pt->r22;
+ dest[23] = pt->r23;
+ dest[24] = pt->r24;
+ dest[25] = pt->r25;
+ dest[26] = pt->r26;
+ dest[27] = pt->r27;
+ dest[28] = pt->r28;
+ dest[29] = pt->gp;
+ dest[30] = rdusp();
+ dest[31] = pt->pc;
+
+ /* Once upon a time this was the PS value. Which is stupid
+ since that is always 8 for usermode. Usurped for the more
+ useful value of the thread's UNIQUE field. */
+ dest[32] = ti->pcb.unique;
+}
+
+int
+dump_elf_task(elf_greg_t *dest, struct task_struct *task)
+{
+ struct thread_info *ti;
+ struct pt_regs *pt;
+
+ ti = task->thread_info;
+ pt = (struct pt_regs *)((unsigned long)ti + 2*PAGE_SIZE) - 1;
+
+ dump_elf_thread(dest, pt, ti);
+
+ return 1;
+}
+
+int
+dump_elf_task_fp(elf_fpreg_t *dest, struct task_struct *task)
+{
+ struct thread_info *ti;
+ struct pt_regs *pt;
+ struct switch_stack *sw;
+
+ ti = task->thread_info;
+ pt = (struct pt_regs *)((unsigned long)ti + 2*PAGE_SIZE) - 1;
+ sw = (struct switch_stack *)pt - 1;
+
+ memcpy(dest, sw->fp, 32 * 8);
+
+ return 1;
+}
+
+/*
+ * sys_execve() executes a new program.
+ */
+asmlinkage int
+do_sys_execve(char __user *ufilename, char __user * __user *argv,
+ char __user * __user *envp, struct pt_regs *regs)
+{
+ int error;
+ char *filename;
+
+ filename = getname(ufilename);
+ error = PTR_ERR(filename);
+ if (IS_ERR(filename))
+ goto out;
+ error = do_execve(filename, argv, envp, regs);
+ putname(filename);
+out:
+ return error;
+}
+
+/*
+ * Return saved PC of a blocked thread. This assumes the frame
+ * pointer is the 6th saved long on the kernel stack and that the
+ * saved return address is the first long in the frame. This all
+ * holds provided the thread blocked through a call to schedule() ($15
+ * is the frame pointer in schedule() and $15 is saved at offset 48 by
+ * entry.S:do_switch_stack).
+ *
+ * Under heavy swap load I've seen this lose in an ugly way. So do
+ * some extra sanity checking on the ranges we expect these pointers
+ * to be in so that we can fail gracefully. This is just for ps after
+ * all. -- r~
+ */
+
+unsigned long
+thread_saved_pc(task_t *t)
+{
+ unsigned long base = (unsigned long)t->thread_info;
+ unsigned long fp, sp = t->thread_info->pcb.ksp;
+
+ if (sp > base && sp+6*8 < base + 16*1024) {
+ fp = ((unsigned long*)sp)[6];
+ if (fp > sp && fp < base + 16*1024)
+ return *(unsigned long *)fp;
+ }
+
+ return 0;
+}
+
+unsigned long
+get_wchan(struct task_struct *p)
+{
+ unsigned long schedule_frame;
+ unsigned long pc;
+ if (!p || p == current || p->state == TASK_RUNNING)
+ return 0;
+ /*
+ * This one depends on the frame size of schedule(). Do a
+ * "disass schedule" in gdb to find the frame size. Also, the
+ * code assumes that sleep_on() follows immediately after
+ * interruptible_sleep_on() and that add_timer() follows
+ * immediately after interruptible_sleep(). Ugly, isn't it?
+ * Maybe adding a wchan field to task_struct would be better,
+ * after all...
+ */
+
+ pc = thread_saved_pc(p);
+ if (in_sched_functions(pc)) {
+ schedule_frame = ((unsigned long *)p->thread_info->pcb.ksp)[6];
+ return ((unsigned long *)schedule_frame)[12];
+ }
+ return pc;
+}
diff --git a/arch/alpha/kernel/proto.h b/arch/alpha/kernel/proto.h
new file mode 100644
index 000000000000..e1560fb15610
--- /dev/null
+++ b/arch/alpha/kernel/proto.h
@@ -0,0 +1,210 @@
+#include <linux/config.h>
+#include <linux/interrupt.h>
+
+
+/* Prototypes of functions used across modules here in this directory. */
+
+#define vucp volatile unsigned char *
+#define vusp volatile unsigned short *
+#define vip volatile int *
+#define vuip volatile unsigned int *
+#define vulp volatile unsigned long *
+
+struct pt_regs;
+struct task_struct;
+struct pci_dev;
+struct pci_controller;
+
+/* core_apecs.c */
+extern struct pci_ops apecs_pci_ops;
+extern void apecs_init_arch(void);
+extern void apecs_pci_clr_err(void);
+extern void apecs_machine_check(u64, u64, struct pt_regs *);
+extern void apecs_pci_tbi(struct pci_controller *, dma_addr_t, dma_addr_t);
+
+/* core_cia.c */
+extern struct pci_ops cia_pci_ops;
+extern void cia_init_pci(void);
+extern void cia_init_arch(void);
+extern void pyxis_init_arch(void);
+extern void cia_kill_arch(int);
+extern void cia_machine_check(u64, u64, struct pt_regs *);
+extern void cia_pci_tbi(struct pci_controller *, dma_addr_t, dma_addr_t);
+
+/* core_irongate.c */
+extern struct pci_ops irongate_pci_ops;
+extern int irongate_pci_clr_err(void);
+extern void irongate_init_arch(void);
+extern void irongate_machine_check(u64, u64, struct pt_regs *);
+#define irongate_pci_tbi ((void *)0)
+
+/* core_lca.c */
+extern struct pci_ops lca_pci_ops;
+extern void lca_init_arch(void);
+extern void lca_machine_check(u64, u64, struct pt_regs *);
+extern void lca_pci_tbi(struct pci_controller *, dma_addr_t, dma_addr_t);
+
+/* core_marvel.c */
+extern struct pci_ops marvel_pci_ops;
+extern void marvel_init_arch(void);
+extern void marvel_kill_arch(int);
+extern void marvel_machine_check(u64, u64, struct pt_regs *);
+extern void marvel_pci_tbi(struct pci_controller *, dma_addr_t, dma_addr_t);
+extern int marvel_pa_to_nid(unsigned long);
+extern int marvel_cpuid_to_nid(int);
+extern unsigned long marvel_node_mem_start(int);
+extern unsigned long marvel_node_mem_size(int);
+extern struct _alpha_agp_info *marvel_agp_info(void);
+struct io7 *marvel_find_io7(int pe);
+struct io7 *marvel_next_io7(struct io7 *prev);
+void io7_clear_errors(struct io7 *io7);
+
+/* core_mcpcia.c */
+extern struct pci_ops mcpcia_pci_ops;
+extern void mcpcia_init_arch(void);
+extern void mcpcia_init_hoses(void);
+extern void mcpcia_machine_check(u64, u64, struct pt_regs *);
+extern void mcpcia_pci_tbi(struct pci_controller *, dma_addr_t, dma_addr_t);
+
+/* core_polaris.c */
+extern struct pci_ops polaris_pci_ops;
+extern int polaris_read_config_dword(struct pci_dev *, int, u32 *);
+extern int polaris_write_config_dword(struct pci_dev *, int, u32);
+extern void polaris_init_arch(void);
+extern void polaris_machine_check(u64, u64, struct pt_regs *);
+#define polaris_pci_tbi ((void *)0)
+
+/* core_t2.c */
+extern struct pci_ops t2_pci_ops;
+extern void t2_init_arch(void);
+extern void t2_kill_arch(int);
+extern void t2_machine_check(u64, u64, struct pt_regs *);
+extern void t2_pci_tbi(struct pci_controller *, dma_addr_t, dma_addr_t);
+
+/* core_titan.c */
+extern struct pci_ops titan_pci_ops;
+extern void titan_init_arch(void);
+extern void titan_kill_arch(int);
+extern void titan_machine_check(u64, u64, struct pt_regs *);
+extern void titan_pci_tbi(struct pci_controller *, dma_addr_t, dma_addr_t);
+extern struct _alpha_agp_info *titan_agp_info(void);
+
+/* core_tsunami.c */
+extern struct pci_ops tsunami_pci_ops;
+extern void tsunami_init_arch(void);
+extern void tsunami_kill_arch(int);
+extern void tsunami_machine_check(u64, u64, struct pt_regs *);
+extern void tsunami_pci_tbi(struct pci_controller *, dma_addr_t, dma_addr_t);
+
+/* core_wildfire.c */
+extern struct pci_ops wildfire_pci_ops;
+extern void wildfire_init_arch(void);
+extern void wildfire_kill_arch(int);
+extern void wildfire_machine_check(u64, u64, struct pt_regs *);
+extern void wildfire_pci_tbi(struct pci_controller *, dma_addr_t, dma_addr_t);
+extern int wildfire_pa_to_nid(unsigned long);
+extern int wildfire_cpuid_to_nid(int);
+extern unsigned long wildfire_node_mem_start(int);
+extern unsigned long wildfire_node_mem_size(int);
+
+/* setup.c */
+extern unsigned long srm_hae;
+extern int boot_cpuid;
+#ifdef CONFIG_VERBOSE_MCHECK
+extern unsigned long alpha_verbose_mcheck;
+#endif
+
+/* srmcons.c */
+#if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_SRM)
+extern void register_srm_console(void);
+extern void unregister_srm_console(void);
+#else
+#define register_srm_console()
+#define unregister_srm_console()
+#endif
+
+/* smp.c */
+extern void setup_smp(void);
+extern void handle_ipi(struct pt_regs *);
+extern void smp_percpu_timer_interrupt(struct pt_regs *);
+
+/* bios32.c */
+/* extern void reset_for_srm(void); */
+
+/* time.c */
+extern irqreturn_t timer_interrupt(int irq, void *dev, struct pt_regs * regs);
+extern void common_init_rtc(void);
+extern unsigned long est_cycle_freq;
+
+/* smc37c93x.c */
+extern void SMC93x_Init(void);
+
+/* smc37c669.c */
+extern void SMC669_Init(int);
+
+/* es1888.c */
+extern void es1888_init(void);
+
+/* ns87312.c */
+extern void ns87312_enable_ide(long ide_base);
+
+/* ../lib/fpreg.c */
+extern void alpha_write_fp_reg (unsigned long reg, unsigned long val);
+extern unsigned long alpha_read_fp_reg (unsigned long reg);
+
+/* head.S */
+extern void wrmces(unsigned long mces);
+extern void cserve_ena(unsigned long);
+extern void cserve_dis(unsigned long);
+extern void __smp_callin(unsigned long);
+
+/* entry.S */
+extern void entArith(void);
+extern void entIF(void);
+extern void entInt(void);
+extern void entMM(void);
+extern void entSys(void);
+extern void entUna(void);
+extern void entDbg(void);
+
+/* ptrace.c */
+extern int ptrace_set_bpt (struct task_struct *child);
+extern int ptrace_cancel_bpt (struct task_struct *child);
+
+/* traps.c */
+extern void dik_show_regs(struct pt_regs *regs, unsigned long *r9_15);
+extern void die_if_kernel(char *, struct pt_regs *, long, unsigned long *);
+
+/* sys_titan.c */
+extern void titan_dispatch_irqs(u64, struct pt_regs *);
+
+/* ../mm/init.c */
+extern void switch_to_system_map(void);
+extern void srm_paging_stop(void);
+
+/* ../mm/remap.c */
+extern int __alpha_remap_area_pages(unsigned long, unsigned long,
+ unsigned long, unsigned long);
+
+/* irq.c */
+
+#ifdef CONFIG_SMP
+#define mcheck_expected(cpu) (cpu_data[cpu].mcheck_expected)
+#define mcheck_taken(cpu) (cpu_data[cpu].mcheck_taken)
+#define mcheck_extra(cpu) (cpu_data[cpu].mcheck_extra)
+#else
+extern struct mcheck_info
+{
+ unsigned char expected __attribute__((aligned(8)));
+ unsigned char taken;
+ unsigned char extra;
+} __mcheck_info;
+
+#define mcheck_expected(cpu) (*((void)(cpu), &__mcheck_info.expected))
+#define mcheck_taken(cpu) (*((void)(cpu), &__mcheck_info.taken))
+#define mcheck_extra(cpu) (*((void)(cpu), &__mcheck_info.extra))
+#endif
+
+extern void process_mcheck_info(unsigned long vector, unsigned long la_ptr,
+ struct pt_regs *regs, const char *machine,
+ int expected);
diff --git a/arch/alpha/kernel/ptrace.c b/arch/alpha/kernel/ptrace.c
new file mode 100644
index 000000000000..d00583161574
--- /dev/null
+++ b/arch/alpha/kernel/ptrace.c
@@ -0,0 +1,415 @@
+/* ptrace.c */
+/* By Ross Biro 1/23/92 */
+/* edited by Linus Torvalds */
+/* mangled further by Bob Manson (manson@santafe.edu) */
+/* more mutilation by David Mosberger (davidm@azstarnet.com) */
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/smp.h>
+#include <linux/smp_lock.h>
+#include <linux/errno.h>
+#include <linux/ptrace.h>
+#include <linux/user.h>
+#include <linux/slab.h>
+#include <linux/security.h>
+
+#include <asm/uaccess.h>
+#include <asm/pgtable.h>
+#include <asm/system.h>
+#include <asm/fpu.h>
+
+#include "proto.h"
+
+#define DEBUG DBG_MEM
+#undef DEBUG
+
+#ifdef DEBUG
+enum {
+ DBG_MEM = (1<<0),
+ DBG_BPT = (1<<1),
+ DBG_MEM_ALL = (1<<2)
+};
+#define DBG(fac,args) {if ((fac) & DEBUG) printk args;}
+#else
+#define DBG(fac,args)
+#endif
+
+#define BREAKINST 0x00000080 /* call_pal bpt */
+
+/*
+ * does not yet catch signals sent when the child dies.
+ * in exit.c or in signal.c.
+ */
+
+/*
+ * Processes always block with the following stack-layout:
+ *
+ * +================================+ <---- task + 2*PAGE_SIZE
+ * | PALcode saved frame (ps, pc, | ^
+ * | gp, a0, a1, a2) | |
+ * +================================+ | struct pt_regs
+ * | | |
+ * | frame generated by SAVE_ALL | |
+ * | | v
+ * +================================+
+ * | | ^
+ * | frame saved by do_switch_stack | | struct switch_stack
+ * | | v
+ * +================================+
+ */
+
+/*
+ * The following table maps a register index into the stack offset at
+ * which the register is saved. Register indices are 0-31 for integer
+ * regs, 32-63 for fp regs, and 64 for the pc. Notice that sp and
+ * zero have no stack-slot and need to be treated specially (see
+ * get_reg/put_reg below).
+ */
+enum {
+ REG_R0 = 0, REG_F0 = 32, REG_FPCR = 63, REG_PC = 64
+};
+
+static int regoff[] = {
+ PT_REG( r0), PT_REG( r1), PT_REG( r2), PT_REG( r3),
+ PT_REG( r4), PT_REG( r5), PT_REG( r6), PT_REG( r7),
+ PT_REG( r8), SW_REG( r9), SW_REG( r10), SW_REG( r11),
+ SW_REG( r12), SW_REG( r13), SW_REG( r14), SW_REG( r15),
+ PT_REG( r16), PT_REG( r17), PT_REG( r18), PT_REG( r19),
+ PT_REG( r20), PT_REG( r21), PT_REG( r22), PT_REG( r23),
+ PT_REG( r24), PT_REG( r25), PT_REG( r26), PT_REG( r27),
+ PT_REG( r28), PT_REG( gp), -1, -1,
+ SW_REG(fp[ 0]), SW_REG(fp[ 1]), SW_REG(fp[ 2]), SW_REG(fp[ 3]),
+ SW_REG(fp[ 4]), SW_REG(fp[ 5]), SW_REG(fp[ 6]), SW_REG(fp[ 7]),
+ SW_REG(fp[ 8]), SW_REG(fp[ 9]), SW_REG(fp[10]), SW_REG(fp[11]),
+ SW_REG(fp[12]), SW_REG(fp[13]), SW_REG(fp[14]), SW_REG(fp[15]),
+ SW_REG(fp[16]), SW_REG(fp[17]), SW_REG(fp[18]), SW_REG(fp[19]),
+ SW_REG(fp[20]), SW_REG(fp[21]), SW_REG(fp[22]), SW_REG(fp[23]),
+ SW_REG(fp[24]), SW_REG(fp[25]), SW_REG(fp[26]), SW_REG(fp[27]),
+ SW_REG(fp[28]), SW_REG(fp[29]), SW_REG(fp[30]), SW_REG(fp[31]),
+ PT_REG( pc)
+};
+
+static unsigned long zero;
+
+/*
+ * Get address of register REGNO in task TASK.
+ */
+static unsigned long *
+get_reg_addr(struct task_struct * task, unsigned long regno)
+{
+ unsigned long *addr;
+
+ if (regno == 30) {
+ addr = &task->thread_info->pcb.usp;
+ } else if (regno == 65) {
+ addr = &task->thread_info->pcb.unique;
+ } else if (regno == 31 || regno > 65) {
+ zero = 0;
+ addr = &zero;
+ } else {
+ addr = (void *)task->thread_info + regoff[regno];
+ }
+ return addr;
+}
+
+/*
+ * Get contents of register REGNO in task TASK.
+ */
+static unsigned long
+get_reg(struct task_struct * task, unsigned long regno)
+{
+ /* Special hack for fpcr -- combine hardware and software bits. */
+ if (regno == 63) {
+ unsigned long fpcr = *get_reg_addr(task, regno);
+ unsigned long swcr
+ = task->thread_info->ieee_state & IEEE_SW_MASK;
+ swcr = swcr_update_status(swcr, fpcr);
+ return fpcr | swcr;
+ }
+ return *get_reg_addr(task, regno);
+}
+
+/*
+ * Write contents of register REGNO in task TASK.
+ */
+static int
+put_reg(struct task_struct *task, unsigned long regno, unsigned long data)
+{
+ if (regno == 63) {
+ task->thread_info->ieee_state
+ = ((task->thread_info->ieee_state & ~IEEE_SW_MASK)
+ | (data & IEEE_SW_MASK));
+ data = (data & FPCR_DYN_MASK) | ieee_swcr_to_fpcr(data);
+ }
+ *get_reg_addr(task, regno) = data;
+ return 0;
+}
+
+static inline int
+read_int(struct task_struct *task, unsigned long addr, int * data)
+{
+ int copied = access_process_vm(task, addr, data, sizeof(int), 0);
+ return (copied == sizeof(int)) ? 0 : -EIO;
+}
+
+static inline int
+write_int(struct task_struct *task, unsigned long addr, int data)
+{
+ int copied = access_process_vm(task, addr, &data, sizeof(int), 1);
+ return (copied == sizeof(int)) ? 0 : -EIO;
+}
+
+/*
+ * Set breakpoint.
+ */
+int
+ptrace_set_bpt(struct task_struct * child)
+{
+ int displ, i, res, reg_b, nsaved = 0;
+ unsigned int insn, op_code;
+ unsigned long pc;
+
+ pc = get_reg(child, REG_PC);
+ res = read_int(child, pc, (int *) &insn);
+ if (res < 0)
+ return res;
+
+ op_code = insn >> 26;
+ if (op_code >= 0x30) {
+ /*
+ * It's a branch: instead of trying to figure out
+ * whether the branch will be taken or not, we'll put
+ * a breakpoint at either location. This is simpler,
+ * more reliable, and probably not a whole lot slower
+ * than the alternative approach of emulating the
+ * branch (emulation can be tricky for fp branches).
+ */
+ displ = ((s32)(insn << 11)) >> 9;
+ child->thread_info->bpt_addr[nsaved++] = pc + 4;
+ if (displ) /* guard against unoptimized code */
+ child->thread_info->bpt_addr[nsaved++]
+ = pc + 4 + displ;
+ DBG(DBG_BPT, ("execing branch\n"));
+ } else if (op_code == 0x1a) {
+ reg_b = (insn >> 16) & 0x1f;
+ child->thread_info->bpt_addr[nsaved++] = get_reg(child, reg_b);
+ DBG(DBG_BPT, ("execing jump\n"));
+ } else {
+ child->thread_info->bpt_addr[nsaved++] = pc + 4;
+ DBG(DBG_BPT, ("execing normal insn\n"));
+ }
+
+ /* install breakpoints: */
+ for (i = 0; i < nsaved; ++i) {
+ res = read_int(child, child->thread_info->bpt_addr[i],
+ (int *) &insn);
+ if (res < 0)
+ return res;
+ child->thread_info->bpt_insn[i] = insn;
+ DBG(DBG_BPT, (" -> next_pc=%lx\n",
+ child->thread_info->bpt_addr[i]));
+ res = write_int(child, child->thread_info->bpt_addr[i],
+ BREAKINST);
+ if (res < 0)
+ return res;
+ }
+ child->thread_info->bpt_nsaved = nsaved;
+ return 0;
+}
+
+/*
+ * Ensure no single-step breakpoint is pending. Returns non-zero
+ * value if child was being single-stepped.
+ */
+int
+ptrace_cancel_bpt(struct task_struct * child)
+{
+ int i, nsaved = child->thread_info->bpt_nsaved;
+
+ child->thread_info->bpt_nsaved = 0;
+
+ if (nsaved > 2) {
+ printk("ptrace_cancel_bpt: bogus nsaved: %d!\n", nsaved);
+ nsaved = 2;
+ }
+
+ for (i = 0; i < nsaved; ++i) {
+ write_int(child, child->thread_info->bpt_addr[i],
+ child->thread_info->bpt_insn[i]);
+ }
+ return (nsaved != 0);
+}
+
+/*
+ * Called by kernel/ptrace.c when detaching..
+ *
+ * Make sure the single step bit is not set.
+ */
+void ptrace_disable(struct task_struct *child)
+{
+ ptrace_cancel_bpt(child);
+}
+
+asmlinkage long
+do_sys_ptrace(long request, long pid, long addr, long data,
+ struct pt_regs *regs)
+{
+ struct task_struct *child;
+ unsigned long tmp;
+ size_t copied;
+ long ret;
+
+ lock_kernel();
+ DBG(DBG_MEM, ("request=%ld pid=%ld addr=0x%lx data=0x%lx\n",
+ request, pid, addr, data));
+ ret = -EPERM;
+ if (request == PTRACE_TRACEME) {
+ /* are we already being traced? */
+ if (current->ptrace & PT_PTRACED)
+ goto out_notsk;
+ ret = security_ptrace(current->parent, current);
+ if (ret)
+ goto out_notsk;
+ /* set the ptrace bit in the process ptrace flags. */
+ current->ptrace |= PT_PTRACED;
+ ret = 0;
+ goto out_notsk;
+ }
+ if (pid == 1) /* you may not mess with init */
+ goto out_notsk;
+
+ ret = -ESRCH;
+ read_lock(&tasklist_lock);
+ child = find_task_by_pid(pid);
+ if (child)
+ get_task_struct(child);
+ read_unlock(&tasklist_lock);
+ if (!child)
+ goto out_notsk;
+
+ if (request == PTRACE_ATTACH) {
+ ret = ptrace_attach(child);
+ goto out;
+ }
+
+ ret = ptrace_check_attach(child, request == PTRACE_KILL);
+ if (ret < 0)
+ goto out;
+
+ switch (request) {
+ /* When I and D space are separate, these will need to be fixed. */
+ case PTRACE_PEEKTEXT: /* read word at location addr. */
+ case PTRACE_PEEKDATA:
+ copied = access_process_vm(child, addr, &tmp, sizeof(tmp), 0);
+ ret = -EIO;
+ if (copied != sizeof(tmp))
+ break;
+
+ regs->r0 = 0; /* special return: no errors */
+ ret = tmp;
+ break;
+
+ /* Read register number ADDR. */
+ case PTRACE_PEEKUSR:
+ regs->r0 = 0; /* special return: no errors */
+ ret = get_reg(child, addr);
+ DBG(DBG_MEM, ("peek $%ld->%#lx\n", addr, ret));
+ break;
+
+ /* When I and D space are separate, this will have to be fixed. */
+ case PTRACE_POKETEXT: /* write the word at location addr. */
+ case PTRACE_POKEDATA:
+ tmp = data;
+ copied = access_process_vm(child, addr, &tmp, sizeof(tmp), 1);
+ ret = (copied == sizeof(tmp)) ? 0 : -EIO;
+ break;
+
+ case PTRACE_POKEUSR: /* write the specified register */
+ DBG(DBG_MEM, ("poke $%ld<-%#lx\n", addr, data));
+ ret = put_reg(child, addr, data);
+ break;
+
+ case PTRACE_SYSCALL:
+ /* continue and stop at next (return from) syscall */
+ case PTRACE_CONT: /* restart after signal. */
+ ret = -EIO;
+ if ((unsigned long) data > _NSIG)
+ break;
+ if (request == PTRACE_SYSCALL)
+ set_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
+ else
+ clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
+ child->exit_code = data;
+ /* make sure single-step breakpoint is gone. */
+ ptrace_cancel_bpt(child);
+ wake_up_process(child);
+ ret = 0;
+ break;
+
+ /*
+ * Make the child exit. Best I can do is send it a sigkill.
+ * perhaps it should be put in the status that it wants to
+ * exit.
+ */
+ case PTRACE_KILL:
+ ret = 0;
+ if (child->exit_state == EXIT_ZOMBIE)
+ break;
+ child->exit_code = SIGKILL;
+ /* make sure single-step breakpoint is gone. */
+ ptrace_cancel_bpt(child);
+ wake_up_process(child);
+ goto out;
+
+ case PTRACE_SINGLESTEP: /* execute single instruction. */
+ ret = -EIO;
+ if ((unsigned long) data > _NSIG)
+ break;
+ /* Mark single stepping. */
+ child->thread_info->bpt_nsaved = -1;
+ clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
+ child->exit_code = data;
+ wake_up_process(child);
+ /* give it a chance to run. */
+ ret = 0;
+ goto out;
+
+ case PTRACE_DETACH: /* detach a process that was attached. */
+ ret = ptrace_detach(child, data);
+ goto out;
+
+ default:
+ ret = ptrace_request(child, request, addr, data);
+ goto out;
+ }
+ out:
+ put_task_struct(child);
+ out_notsk:
+ unlock_kernel();
+ return ret;
+}
+
+asmlinkage void
+syscall_trace(void)
+{
+ if (!test_thread_flag(TIF_SYSCALL_TRACE))
+ return;
+ if (!(current->ptrace & PT_PTRACED))
+ return;
+ /* The 0x80 provides a way for the tracing parent to distinguish
+ between a syscall stop and SIGTRAP delivery */
+ ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD)
+ ? 0x80 : 0));
+
+ /*
+ * This isn't the same as continuing with a signal, but it will do
+ * for normal use. strace only continues with a signal if the
+ * stopping signal is not SIGTRAP. -brl
+ */
+ if (current->exit_code) {
+ send_sig(current->exit_code, current, 1);
+ current->exit_code = 0;
+ }
+}
diff --git a/arch/alpha/kernel/semaphore.c b/arch/alpha/kernel/semaphore.c
new file mode 100644
index 000000000000..8c8aaa205eae
--- /dev/null
+++ b/arch/alpha/kernel/semaphore.c
@@ -0,0 +1,224 @@
+/*
+ * Alpha semaphore implementation.
+ *
+ * (C) Copyright 1996 Linus Torvalds
+ * (C) Copyright 1999, 2000 Richard Henderson
+ */
+
+#include <linux/errno.h>
+#include <linux/sched.h>
+#include <linux/init.h>
+
+/*
+ * This is basically the PPC semaphore scheme ported to use
+ * the Alpha ll/sc sequences, so see the PPC code for
+ * credits.
+ */
+
+/*
+ * Atomically update sem->count.
+ * This does the equivalent of the following:
+ *
+ * old_count = sem->count;
+ * tmp = MAX(old_count, 0) + incr;
+ * sem->count = tmp;
+ * return old_count;
+ */
+static inline int __sem_update_count(struct semaphore *sem, int incr)
+{
+ long old_count, tmp = 0;
+
+ __asm__ __volatile__(
+ "1: ldl_l %0,%2\n"
+ " cmovgt %0,%0,%1\n"
+ " addl %1,%3,%1\n"
+ " stl_c %1,%2\n"
+ " beq %1,2f\n"
+ " mb\n"
+ ".subsection 2\n"
+ "2: br 1b\n"
+ ".previous"
+ : "=&r" (old_count), "=&r" (tmp), "=m" (sem->count)
+ : "Ir" (incr), "1" (tmp), "m" (sem->count));
+
+ return old_count;
+}
+
+/*
+ * Perform the "down" function. Return zero for semaphore acquired,
+ * return negative for signalled out of the function.
+ *
+ * If called from down, the return is ignored and the wait loop is
+ * not interruptible. This means that a task waiting on a semaphore
+ * using "down()" cannot be killed until someone does an "up()" on
+ * the semaphore.
+ *
+ * If called from down_interruptible, the return value gets checked
+ * upon return. If the return value is negative then the task continues
+ * with the negative value in the return register (it can be tested by
+ * the caller).
+ *
+ * Either form may be used in conjunction with "up()".
+ */
+
+void __sched
+__down_failed(struct semaphore *sem)
+{
+ struct task_struct *tsk = current;
+ DECLARE_WAITQUEUE(wait, tsk);
+
+#ifdef CONFIG_DEBUG_SEMAPHORE
+ printk("%s(%d): down failed(%p)\n",
+ tsk->comm, tsk->pid, sem);
+#endif
+
+ tsk->state = TASK_UNINTERRUPTIBLE;
+ wmb();
+ add_wait_queue_exclusive(&sem->wait, &wait);
+
+ /*
+ * Try to get the semaphore. If the count is > 0, then we've
+ * got the semaphore; we decrement count and exit the loop.
+ * If the count is 0 or negative, we set it to -1, indicating
+ * that we are asleep, and then sleep.
+ */
+ while (__sem_update_count(sem, -1) <= 0) {
+ schedule();
+ set_task_state(tsk, TASK_UNINTERRUPTIBLE);
+ }
+ remove_wait_queue(&sem->wait, &wait);
+ tsk->state = TASK_RUNNING;
+
+ /*
+ * If there are any more sleepers, wake one of them up so
+ * that it can either get the semaphore, or set count to -1
+ * indicating that there are still processes sleeping.
+ */
+ wake_up(&sem->wait);
+
+#ifdef CONFIG_DEBUG_SEMAPHORE
+ printk("%s(%d): down acquired(%p)\n",
+ tsk->comm, tsk->pid, sem);
+#endif
+}
+
+int __sched
+__down_failed_interruptible(struct semaphore *sem)
+{
+ struct task_struct *tsk = current;
+ DECLARE_WAITQUEUE(wait, tsk);
+ long ret = 0;
+
+#ifdef CONFIG_DEBUG_SEMAPHORE
+ printk("%s(%d): down failed(%p)\n",
+ tsk->comm, tsk->pid, sem);
+#endif
+
+ tsk->state = TASK_INTERRUPTIBLE;
+ wmb();
+ add_wait_queue_exclusive(&sem->wait, &wait);
+
+ while (__sem_update_count(sem, -1) <= 0) {
+ if (signal_pending(current)) {
+ /*
+ * A signal is pending - give up trying.
+ * Set sem->count to 0 if it is negative,
+ * since we are no longer sleeping.
+ */
+ __sem_update_count(sem, 0);
+ ret = -EINTR;
+ break;
+ }
+ schedule();
+ set_task_state(tsk, TASK_INTERRUPTIBLE);
+ }
+
+ remove_wait_queue(&sem->wait, &wait);
+ tsk->state = TASK_RUNNING;
+ wake_up(&sem->wait);
+
+#ifdef CONFIG_DEBUG_SEMAPHORE
+ printk("%s(%d): down %s(%p)\n",
+ current->comm, current->pid,
+ (ret < 0 ? "interrupted" : "acquired"), sem);
+#endif
+ return ret;
+}
+
+void
+__up_wakeup(struct semaphore *sem)
+{
+ /*
+ * Note that we incremented count in up() before we came here,
+ * but that was ineffective since the result was <= 0, and
+ * any negative value of count is equivalent to 0.
+ * This ends up setting count to 1, unless count is now > 0
+ * (i.e. because some other cpu has called up() in the meantime),
+ * in which case we just increment count.
+ */
+ __sem_update_count(sem, 1);
+ wake_up(&sem->wait);
+}
+
+void __sched
+down(struct semaphore *sem)
+{
+#ifdef WAITQUEUE_DEBUG
+ CHECK_MAGIC(sem->__magic);
+#endif
+#ifdef CONFIG_DEBUG_SEMAPHORE
+ printk("%s(%d): down(%p) <count=%d> from %p\n",
+ current->comm, current->pid, sem,
+ atomic_read(&sem->count), __builtin_return_address(0));
+#endif
+ __down(sem);
+}
+
+int __sched
+down_interruptible(struct semaphore *sem)
+{
+#ifdef WAITQUEUE_DEBUG
+ CHECK_MAGIC(sem->__magic);
+#endif
+#ifdef CONFIG_DEBUG_SEMAPHORE
+ printk("%s(%d): down(%p) <count=%d> from %p\n",
+ current->comm, current->pid, sem,
+ atomic_read(&sem->count), __builtin_return_address(0));
+#endif
+ return __down_interruptible(sem);
+}
+
+int
+down_trylock(struct semaphore *sem)
+{
+ int ret;
+
+#ifdef WAITQUEUE_DEBUG
+ CHECK_MAGIC(sem->__magic);
+#endif
+
+ ret = __down_trylock(sem);
+
+#ifdef CONFIG_DEBUG_SEMAPHORE
+ printk("%s(%d): down_trylock %s from %p\n",
+ current->comm, current->pid,
+ ret ? "failed" : "acquired",
+ __builtin_return_address(0));
+#endif
+
+ return ret;
+}
+
+void
+up(struct semaphore *sem)
+{
+#ifdef WAITQUEUE_DEBUG
+ CHECK_MAGIC(sem->__magic);
+#endif
+#ifdef CONFIG_DEBUG_SEMAPHORE
+ printk("%s(%d): up(%p) <count=%d> from %p\n",
+ current->comm, current->pid, sem,
+ atomic_read(&sem->count), __builtin_return_address(0));
+#endif
+ __up(sem);
+}
diff --git a/arch/alpha/kernel/setup.c b/arch/alpha/kernel/setup.c
new file mode 100644
index 000000000000..b4e5f8ff2b25
--- /dev/null
+++ b/arch/alpha/kernel/setup.c
@@ -0,0 +1,1486 @@
+/*
+ * linux/arch/alpha/kernel/setup.c
+ *
+ * Copyright (C) 1995 Linus Torvalds
+ */
+
+/* 2.3.x bootmem, 1999 Andrea Arcangeli <andrea@suse.de> */
+
+/*
+ * Bootup setup stuff.
+ */
+
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/stddef.h>
+#include <linux/unistd.h>
+#include <linux/ptrace.h>
+#include <linux/slab.h>
+#include <linux/user.h>
+#include <linux/a.out.h>
+#include <linux/tty.h>
+#include <linux/delay.h>
+#include <linux/config.h> /* CONFIG_ALPHA_LCA etc */
+#include <linux/mc146818rtc.h>
+#include <linux/console.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/string.h>
+#include <linux/ioport.h>
+#include <linux/bootmem.h>
+#include <linux/pci.h>
+#include <linux/seq_file.h>
+#include <linux/root_dev.h>
+#include <linux/initrd.h>
+#include <linux/eisa.h>
+#ifdef CONFIG_MAGIC_SYSRQ
+#include <linux/sysrq.h>
+#include <linux/reboot.h>
+#endif
+#include <linux/notifier.h>
+#include <asm/setup.h>
+#include <asm/io.h>
+
+extern struct notifier_block *panic_notifier_list;
+static int alpha_panic_event(struct notifier_block *, unsigned long, void *);
+static struct notifier_block alpha_panic_block = {
+ alpha_panic_event,
+ NULL,
+ INT_MAX /* try to do it first */
+};
+
+#include <asm/uaccess.h>
+#include <asm/pgtable.h>
+#include <asm/system.h>
+#include <asm/hwrpb.h>
+#include <asm/dma.h>
+#include <asm/io.h>
+#include <asm/mmu_context.h>
+#include <asm/console.h>
+
+#include "proto.h"
+#include "pci_impl.h"
+
+
+struct hwrpb_struct *hwrpb;
+unsigned long srm_hae;
+
+int alpha_l1i_cacheshape;
+int alpha_l1d_cacheshape;
+int alpha_l2_cacheshape;
+int alpha_l3_cacheshape;
+
+#ifdef CONFIG_VERBOSE_MCHECK
+/* 0=minimum, 1=verbose, 2=all */
+/* These can be overridden via the command line, ie "verbose_mcheck=2") */
+unsigned long alpha_verbose_mcheck = CONFIG_VERBOSE_MCHECK_ON;
+#endif
+
+/* Which processor we booted from. */
+int boot_cpuid;
+
+/*
+ * Using SRM callbacks for initial console output. This works from
+ * setup_arch() time through the end of time_init(), as those places
+ * are under our (Alpha) control.
+
+ * "srmcons" specified in the boot command arguments allows us to
+ * see kernel messages during the period of time before the true
+ * console device is "registered" during console_init().
+ * As of this version (2.5.59), console_init() will call
+ * disable_early_printk() as the last action before initializing
+ * the console drivers. That's the last possible time srmcons can be
+ * unregistered without interfering with console behavior.
+ *
+ * By default, OFF; set it with a bootcommand arg of "srmcons" or
+ * "console=srm". The meaning of these two args is:
+ * "srmcons" - early callback prints
+ * "console=srm" - full callback based console, including early prints
+ */
+int srmcons_output = 0;
+
+/* Enforce a memory size limit; useful for testing. By default, none. */
+unsigned long mem_size_limit = 0;
+
+/* Set AGP GART window size (0 means disabled). */
+unsigned long alpha_agpgart_size = DEFAULT_AGP_APER_SIZE;
+
+#ifdef CONFIG_ALPHA_GENERIC
+struct alpha_machine_vector alpha_mv;
+int alpha_using_srm;
+#endif
+
+#define N(a) (sizeof(a)/sizeof(a[0]))
+
+static struct alpha_machine_vector *get_sysvec(unsigned long, unsigned long,
+ unsigned long);
+static struct alpha_machine_vector *get_sysvec_byname(const char *);
+static void get_sysnames(unsigned long, unsigned long, unsigned long,
+ char **, char **);
+static void determine_cpu_caches (unsigned int);
+
+static char command_line[COMMAND_LINE_SIZE];
+
+/*
+ * The format of "screen_info" is strange, and due to early
+ * i386-setup code. This is just enough to make the console
+ * code think we're on a VGA color display.
+ */
+
+struct screen_info screen_info = {
+ .orig_x = 0,
+ .orig_y = 25,
+ .orig_video_cols = 80,
+ .orig_video_lines = 25,
+ .orig_video_isVGA = 1,
+ .orig_video_points = 16
+};
+
+/*
+ * The direct map I/O window, if any. This should be the same
+ * for all busses, since it's used by virt_to_bus.
+ */
+
+unsigned long __direct_map_base;
+unsigned long __direct_map_size;
+
+/*
+ * Declare all of the machine vectors.
+ */
+
+/* GCC 2.7.2 (on alpha at least) is lame. It does not support either
+ __attribute__((weak)) or #pragma weak. Bypass it and talk directly
+ to the assembler. */
+
+#define WEAK(X) \
+ extern struct alpha_machine_vector X; \
+ asm(".weak "#X)
+
+WEAK(alcor_mv);
+WEAK(alphabook1_mv);
+WEAK(avanti_mv);
+WEAK(cabriolet_mv);
+WEAK(clipper_mv);
+WEAK(dp264_mv);
+WEAK(eb164_mv);
+WEAK(eb64p_mv);
+WEAK(eb66_mv);
+WEAK(eb66p_mv);
+WEAK(eiger_mv);
+WEAK(jensen_mv);
+WEAK(lx164_mv);
+WEAK(lynx_mv);
+WEAK(marvel_ev7_mv);
+WEAK(miata_mv);
+WEAK(mikasa_mv);
+WEAK(mikasa_primo_mv);
+WEAK(monet_mv);
+WEAK(nautilus_mv);
+WEAK(noname_mv);
+WEAK(noritake_mv);
+WEAK(noritake_primo_mv);
+WEAK(p2k_mv);
+WEAK(pc164_mv);
+WEAK(privateer_mv);
+WEAK(rawhide_mv);
+WEAK(ruffian_mv);
+WEAK(rx164_mv);
+WEAK(sable_mv);
+WEAK(sable_gamma_mv);
+WEAK(shark_mv);
+WEAK(sx164_mv);
+WEAK(takara_mv);
+WEAK(titan_mv);
+WEAK(webbrick_mv);
+WEAK(wildfire_mv);
+WEAK(xl_mv);
+WEAK(xlt_mv);
+
+#undef WEAK
+
+/*
+ * I/O resources inherited from PeeCees. Except for perhaps the
+ * turbochannel alphas, everyone has these on some sort of SuperIO chip.
+ *
+ * ??? If this becomes less standard, move the struct out into the
+ * machine vector.
+ */
+
+static void __init
+reserve_std_resources(void)
+{
+ static struct resource standard_io_resources[] = {
+ { .name = "rtc", .start = -1, .end = -1 },
+ { .name = "dma1", .start = 0x00, .end = 0x1f },
+ { .name = "pic1", .start = 0x20, .end = 0x3f },
+ { .name = "timer", .start = 0x40, .end = 0x5f },
+ { .name = "keyboard", .start = 0x60, .end = 0x6f },
+ { .name = "dma page reg", .start = 0x80, .end = 0x8f },
+ { .name = "pic2", .start = 0xa0, .end = 0xbf },
+ { .name = "dma2", .start = 0xc0, .end = 0xdf },
+ };
+
+ struct resource *io = &ioport_resource;
+ size_t i;
+
+ if (hose_head) {
+ struct pci_controller *hose;
+ for (hose = hose_head; hose; hose = hose->next)
+ if (hose->index == 0) {
+ io = hose->io_space;
+ break;
+ }
+ }
+
+ /* Fix up for the Jensen's queer RTC placement. */
+ standard_io_resources[0].start = RTC_PORT(0);
+ standard_io_resources[0].end = RTC_PORT(0) + 0x10;
+
+ for (i = 0; i < N(standard_io_resources); ++i)
+ request_resource(io, standard_io_resources+i);
+}
+
+#define PFN_UP(x) (((x) + PAGE_SIZE-1) >> PAGE_SHIFT)
+#define PFN_DOWN(x) ((x) >> PAGE_SHIFT)
+#define PFN_PHYS(x) ((x) << PAGE_SHIFT)
+#define PFN_MAX PFN_DOWN(0x80000000)
+#define for_each_mem_cluster(memdesc, cluster, i) \
+ for ((cluster) = (memdesc)->cluster, (i) = 0; \
+ (i) < (memdesc)->numclusters; (i)++, (cluster)++)
+
+static unsigned long __init
+get_mem_size_limit(char *s)
+{
+ unsigned long end = 0;
+ char *from = s;
+
+ end = simple_strtoul(from, &from, 0);
+ if ( *from == 'K' || *from == 'k' ) {
+ end = end << 10;
+ from++;
+ } else if ( *from == 'M' || *from == 'm' ) {
+ end = end << 20;
+ from++;
+ } else if ( *from == 'G' || *from == 'g' ) {
+ end = end << 30;
+ from++;
+ }
+ return end >> PAGE_SHIFT; /* Return the PFN of the limit. */
+}
+
+#ifdef CONFIG_BLK_DEV_INITRD
+void * __init
+move_initrd(unsigned long mem_limit)
+{
+ void *start;
+ unsigned long size;
+
+ size = initrd_end - initrd_start;
+ start = __alloc_bootmem(PAGE_ALIGN(size), PAGE_SIZE, 0);
+ if (!start || __pa(start) + size > mem_limit) {
+ initrd_start = initrd_end = 0;
+ return NULL;
+ }
+ memmove(start, (void *)initrd_start, size);
+ initrd_start = (unsigned long)start;
+ initrd_end = initrd_start + size;
+ printk("initrd moved to %p\n", start);
+ return start;
+}
+#endif
+
+#ifndef CONFIG_DISCONTIGMEM
+static void __init
+setup_memory(void *kernel_end)
+{
+ struct memclust_struct * cluster;
+ struct memdesc_struct * memdesc;
+ unsigned long start_kernel_pfn, end_kernel_pfn;
+ unsigned long bootmap_size, bootmap_pages, bootmap_start;
+ unsigned long start, end;
+ unsigned long i;
+
+ /* Find free clusters, and init and free the bootmem accordingly. */
+ memdesc = (struct memdesc_struct *)
+ (hwrpb->mddt_offset + (unsigned long) hwrpb);
+
+ for_each_mem_cluster(memdesc, cluster, i) {
+ printk("memcluster %lu, usage %01lx, start %8lu, end %8lu\n",
+ i, cluster->usage, cluster->start_pfn,
+ cluster->start_pfn + cluster->numpages);
+
+ /* Bit 0 is console/PALcode reserved. Bit 1 is
+ non-volatile memory -- we might want to mark
+ this for later. */
+ if (cluster->usage & 3)
+ continue;
+
+ end = cluster->start_pfn + cluster->numpages;
+ if (end > max_low_pfn)
+ max_low_pfn = end;
+ }
+
+ /*
+ * Except for the NUMA systems (wildfire, marvel) all of the
+ * Alpha systems we run on support 32GB of memory or less.
+ * Since the NUMA systems introduce large holes in memory addressing,
+ * we can get into a situation where there is not enough contiguous
+ * memory for the memory map.
+ *
+ * Limit memory to the first 32GB to limit the NUMA systems to
+ * memory on their first node (wildfire) or 2 (marvel) to avoid
+ * not being able to produce the memory map. In order to access
+ * all of the memory on the NUMA systems, build with discontiguous
+ * memory support.
+ *
+ * If the user specified a memory limit, let that memory limit stand.
+ */
+ if (!mem_size_limit)
+ mem_size_limit = (32ul * 1024 * 1024 * 1024) >> PAGE_SHIFT;
+
+ if (mem_size_limit && max_low_pfn >= mem_size_limit)
+ {
+ printk("setup: forcing memory size to %ldK (from %ldK).\n",
+ mem_size_limit << (PAGE_SHIFT - 10),
+ max_low_pfn << (PAGE_SHIFT - 10));
+ max_low_pfn = mem_size_limit;
+ }
+
+ /* Find the bounds of kernel memory. */
+ start_kernel_pfn = PFN_DOWN(KERNEL_START_PHYS);
+ end_kernel_pfn = PFN_UP(virt_to_phys(kernel_end));
+ bootmap_start = -1;
+
+ try_again:
+ if (max_low_pfn <= end_kernel_pfn)
+ panic("not enough memory to boot");
+
+ /* We need to know how many physically contiguous pages
+ we'll need for the bootmap. */
+ bootmap_pages = bootmem_bootmap_pages(max_low_pfn);
+
+ /* Now find a good region where to allocate the bootmap. */
+ for_each_mem_cluster(memdesc, cluster, i) {
+ if (cluster->usage & 3)
+ continue;
+
+ start = cluster->start_pfn;
+ end = start + cluster->numpages;
+ if (start >= max_low_pfn)
+ continue;
+ if (end > max_low_pfn)
+ end = max_low_pfn;
+ if (start < start_kernel_pfn) {
+ if (end > end_kernel_pfn
+ && end - end_kernel_pfn >= bootmap_pages) {
+ bootmap_start = end_kernel_pfn;
+ break;
+ } else if (end > start_kernel_pfn)
+ end = start_kernel_pfn;
+ } else if (start < end_kernel_pfn)
+ start = end_kernel_pfn;
+ if (end - start >= bootmap_pages) {
+ bootmap_start = start;
+ break;
+ }
+ }
+
+ if (bootmap_start == ~0UL) {
+ max_low_pfn >>= 1;
+ goto try_again;
+ }
+
+ /* Allocate the bootmap and mark the whole MM as reserved. */
+ bootmap_size = init_bootmem(bootmap_start, max_low_pfn);
+
+ /* Mark the free regions. */
+ for_each_mem_cluster(memdesc, cluster, i) {
+ if (cluster->usage & 3)
+ continue;
+
+ start = cluster->start_pfn;
+ end = cluster->start_pfn + cluster->numpages;
+ if (start >= max_low_pfn)
+ continue;
+ if (end > max_low_pfn)
+ end = max_low_pfn;
+ if (start < start_kernel_pfn) {
+ if (end > end_kernel_pfn) {
+ free_bootmem(PFN_PHYS(start),
+ (PFN_PHYS(start_kernel_pfn)
+ - PFN_PHYS(start)));
+ printk("freeing pages %ld:%ld\n",
+ start, start_kernel_pfn);
+ start = end_kernel_pfn;
+ } else if (end > start_kernel_pfn)
+ end = start_kernel_pfn;
+ } else if (start < end_kernel_pfn)
+ start = end_kernel_pfn;
+ if (start >= end)
+ continue;
+
+ free_bootmem(PFN_PHYS(start), PFN_PHYS(end) - PFN_PHYS(start));
+ printk("freeing pages %ld:%ld\n", start, end);
+ }
+
+ /* Reserve the bootmap memory. */
+ reserve_bootmem(PFN_PHYS(bootmap_start), bootmap_size);
+ printk("reserving pages %ld:%ld\n", bootmap_start, bootmap_start+PFN_UP(bootmap_size));
+
+#ifdef CONFIG_BLK_DEV_INITRD
+ initrd_start = INITRD_START;
+ if (initrd_start) {
+ initrd_end = initrd_start+INITRD_SIZE;
+ printk("Initial ramdisk at: 0x%p (%lu bytes)\n",
+ (void *) initrd_start, INITRD_SIZE);
+
+ if ((void *)initrd_end > phys_to_virt(PFN_PHYS(max_low_pfn))) {
+ if (!move_initrd(PFN_PHYS(max_low_pfn)))
+ printk("initrd extends beyond end of memory "
+ "(0x%08lx > 0x%p)\ndisabling initrd\n",
+ initrd_end,
+ phys_to_virt(PFN_PHYS(max_low_pfn)));
+ } else {
+ reserve_bootmem(virt_to_phys((void *)initrd_start),
+ INITRD_SIZE);
+ }
+ }
+#endif /* CONFIG_BLK_DEV_INITRD */
+}
+#else
+extern void setup_memory(void *);
+#endif /* !CONFIG_DISCONTIGMEM */
+
+int __init
+page_is_ram(unsigned long pfn)
+{
+ struct memclust_struct * cluster;
+ struct memdesc_struct * memdesc;
+ unsigned long i;
+
+ memdesc = (struct memdesc_struct *)
+ (hwrpb->mddt_offset + (unsigned long) hwrpb);
+ for_each_mem_cluster(memdesc, cluster, i)
+ {
+ if (pfn >= cluster->start_pfn &&
+ pfn < cluster->start_pfn + cluster->numpages) {
+ return (cluster->usage & 3) ? 0 : 1;
+ }
+ }
+
+ return 0;
+}
+
+#undef PFN_UP
+#undef PFN_DOWN
+#undef PFN_PHYS
+#undef PFN_MAX
+
+void __init
+setup_arch(char **cmdline_p)
+{
+ extern char _end[];
+
+ struct alpha_machine_vector *vec = NULL;
+ struct percpu_struct *cpu;
+ char *type_name, *var_name, *p;
+ void *kernel_end = _end; /* end of kernel */
+ char *args = command_line;
+
+ hwrpb = (struct hwrpb_struct*) __va(INIT_HWRPB->phys_addr);
+ boot_cpuid = hard_smp_processor_id();
+
+ /*
+ * Pre-process the system type to make sure it will be valid.
+ *
+ * This may restore real CABRIO and EB66+ family names, ie
+ * EB64+ and EB66.
+ *
+ * Oh, and "white box" AS800 (aka DIGITAL Server 3000 series)
+ * and AS1200 (DIGITAL Server 5000 series) have the type as
+ * the negative of the real one.
+ */
+ if ((long)hwrpb->sys_type < 0) {
+ hwrpb->sys_type = -((long)hwrpb->sys_type);
+ hwrpb_update_checksum(hwrpb);
+ }
+
+ /* Register a call for panic conditions. */
+ notifier_chain_register(&panic_notifier_list, &alpha_panic_block);
+
+#ifdef CONFIG_ALPHA_GENERIC
+ /* Assume that we've booted from SRM if we haven't booted from MILO.
+ Detect the later by looking for "MILO" in the system serial nr. */
+ alpha_using_srm = strncmp((const char *)hwrpb->ssn, "MILO", 4) != 0;
+#endif
+
+ /* If we are using SRM, we want to allow callbacks
+ as early as possible, so do this NOW, and then
+ they should work immediately thereafter.
+ */
+ kernel_end = callback_init(kernel_end);
+
+ /*
+ * Locate the command line.
+ */
+ /* Hack for Jensen... since we're restricted to 8 or 16 chars for
+ boot flags depending on the boot mode, we need some shorthand.
+ This should do for installation. */
+ if (strcmp(COMMAND_LINE, "INSTALL") == 0) {
+ strlcpy(command_line, "root=/dev/fd0 load_ramdisk=1", sizeof command_line);
+ } else {
+ strlcpy(command_line, COMMAND_LINE, sizeof command_line);
+ }
+ strcpy(saved_command_line, command_line);
+ *cmdline_p = command_line;
+
+ /*
+ * Process command-line arguments.
+ */
+ while ((p = strsep(&args, " \t")) != NULL) {
+ if (!*p) continue;
+ if (strncmp(p, "alpha_mv=", 9) == 0) {
+ vec = get_sysvec_byname(p+9);
+ continue;
+ }
+ if (strncmp(p, "cycle=", 6) == 0) {
+ est_cycle_freq = simple_strtol(p+6, NULL, 0);
+ continue;
+ }
+ if (strncmp(p, "mem=", 4) == 0) {
+ mem_size_limit = get_mem_size_limit(p+4);
+ continue;
+ }
+ if (strncmp(p, "srmcons", 7) == 0) {
+ srmcons_output |= 1;
+ continue;
+ }
+ if (strncmp(p, "console=srm", 11) == 0) {
+ srmcons_output |= 2;
+ continue;
+ }
+ if (strncmp(p, "gartsize=", 9) == 0) {
+ alpha_agpgart_size =
+ get_mem_size_limit(p+9) << PAGE_SHIFT;
+ continue;
+ }
+#ifdef CONFIG_VERBOSE_MCHECK
+ if (strncmp(p, "verbose_mcheck=", 15) == 0) {
+ alpha_verbose_mcheck = simple_strtol(p+15, NULL, 0);
+ continue;
+ }
+#endif
+ }
+
+ /* Replace the command line, now that we've killed it with strsep. */
+ strcpy(command_line, saved_command_line);
+
+ /* If we want SRM console printk echoing early, do it now. */
+ if (alpha_using_srm && srmcons_output) {
+ register_srm_console();
+
+ /*
+ * If "console=srm" was specified, clear the srmcons_output
+ * flag now so that time.c won't unregister_srm_console
+ */
+ if (srmcons_output & 2)
+ srmcons_output = 0;
+ }
+
+#ifdef CONFIG_MAGIC_SYSRQ
+ /* If we're using SRM, make sysrq-b halt back to the prom,
+ not auto-reboot. */
+ if (alpha_using_srm) {
+ struct sysrq_key_op *op = __sysrq_get_key_op('b');
+ op->handler = (void *) machine_halt;
+ }
+#endif
+
+ /*
+ * Identify and reconfigure for the current system.
+ */
+ cpu = (struct percpu_struct*)((char*)hwrpb + hwrpb->processor_offset);
+
+ get_sysnames(hwrpb->sys_type, hwrpb->sys_variation,
+ cpu->type, &type_name, &var_name);
+ if (*var_name == '0')
+ var_name = "";
+
+ if (!vec) {
+ vec = get_sysvec(hwrpb->sys_type, hwrpb->sys_variation,
+ cpu->type);
+ }
+
+ if (!vec) {
+ panic("Unsupported system type: %s%s%s (%ld %ld)\n",
+ type_name, (*var_name ? " variation " : ""), var_name,
+ hwrpb->sys_type, hwrpb->sys_variation);
+ }
+ if (vec != &alpha_mv) {
+ alpha_mv = *vec;
+ }
+
+ printk("Booting "
+#ifdef CONFIG_ALPHA_GENERIC
+ "GENERIC "
+#endif
+ "on %s%s%s using machine vector %s from %s\n",
+ type_name, (*var_name ? " variation " : ""),
+ var_name, alpha_mv.vector_name,
+ (alpha_using_srm ? "SRM" : "MILO"));
+
+ printk("Major Options: "
+#ifdef CONFIG_SMP
+ "SMP "
+#endif
+#ifdef CONFIG_ALPHA_EV56
+ "EV56 "
+#endif
+#ifdef CONFIG_ALPHA_EV67
+ "EV67 "
+#endif
+#ifdef CONFIG_ALPHA_LEGACY_START_ADDRESS
+ "LEGACY_START "
+#endif
+#ifdef CONFIG_VERBOSE_MCHECK
+ "VERBOSE_MCHECK "
+#endif
+
+#ifdef CONFIG_DISCONTIGMEM
+ "DISCONTIGMEM "
+#ifdef CONFIG_NUMA
+ "NUMA "
+#endif
+#endif
+
+#ifdef CONFIG_DEBUG_SPINLOCK
+ "DEBUG_SPINLOCK "
+#endif
+#ifdef CONFIG_MAGIC_SYSRQ
+ "MAGIC_SYSRQ "
+#endif
+ "\n");
+
+ printk("Command line: %s\n", command_line);
+
+ /*
+ * Sync up the HAE.
+ * Save the SRM's current value for restoration.
+ */
+ srm_hae = *alpha_mv.hae_register;
+ __set_hae(alpha_mv.hae_cache);
+
+ /* Reset enable correctable error reports. */
+ wrmces(0x7);
+
+ /* Find our memory. */
+ setup_memory(kernel_end);
+
+ /* First guess at cpu cache sizes. Do this before init_arch. */
+ determine_cpu_caches(cpu->type);
+
+ /* Initialize the machine. Usually has to do with setting up
+ DMA windows and the like. */
+ if (alpha_mv.init_arch)
+ alpha_mv.init_arch();
+
+ /* Reserve standard resources. */
+ reserve_std_resources();
+
+ /*
+ * Give us a default console. TGA users will see nothing until
+ * chr_dev_init is called, rather late in the boot sequence.
+ */
+
+#ifdef CONFIG_VT
+#if defined(CONFIG_VGA_CONSOLE)
+ conswitchp = &vga_con;
+#elif defined(CONFIG_DUMMY_CONSOLE)
+ conswitchp = &dummy_con;
+#endif
+#endif
+
+ /* Default root filesystem to sda2. */
+ ROOT_DEV = Root_SDA2;
+
+#ifdef CONFIG_EISA
+ /* FIXME: only set this when we actually have EISA in this box? */
+ EISA_bus = 1;
+#endif
+
+ /*
+ * Check ASN in HWRPB for validity, report if bad.
+ * FIXME: how was this failing? Should we trust it instead,
+ * and copy the value into alpha_mv.max_asn?
+ */
+
+ if (hwrpb->max_asn != MAX_ASN) {
+ printk("Max ASN from HWRPB is bad (0x%lx)\n", hwrpb->max_asn);
+ }
+
+ /*
+ * Identify the flock of penguins.
+ */
+
+#ifdef CONFIG_SMP
+ setup_smp();
+#endif
+ paging_init();
+}
+
+void __init
+disable_early_printk(void)
+{
+ if (alpha_using_srm && srmcons_output) {
+ unregister_srm_console();
+ srmcons_output = 0;
+ }
+}
+
+static char sys_unknown[] = "Unknown";
+static char systype_names[][16] = {
+ "0",
+ "ADU", "Cobra", "Ruby", "Flamingo", "Mannequin", "Jensen",
+ "Pelican", "Morgan", "Sable", "Medulla", "Noname",
+ "Turbolaser", "Avanti", "Mustang", "Alcor", "Tradewind",
+ "Mikasa", "EB64", "EB66", "EB64+", "AlphaBook1",
+ "Rawhide", "K2", "Lynx", "XL", "EB164", "Noritake",
+ "Cortex", "29", "Miata", "XXM", "Takara", "Yukon",
+ "Tsunami", "Wildfire", "CUSCO", "Eiger", "Titan", "Marvel"
+};
+
+static char unofficial_names[][8] = {"100", "Ruffian"};
+
+static char api_names[][16] = {"200", "Nautilus"};
+
+static char eb164_names[][8] = {"EB164", "PC164", "LX164", "SX164", "RX164"};
+static int eb164_indices[] = {0,0,0,1,1,1,1,1,2,2,2,2,3,3,3,3,4};
+
+static char alcor_names[][16] = {"Alcor", "Maverick", "Bret"};
+static int alcor_indices[] = {0,0,0,1,1,1,0,0,0,0,0,0,2,2,2,2,2,2};
+
+static char eb64p_names[][16] = {"EB64+", "Cabriolet", "AlphaPCI64"};
+static int eb64p_indices[] = {0,0,1,2};
+
+static char eb66_names[][8] = {"EB66", "EB66+"};
+static int eb66_indices[] = {0,0,1};
+
+static char marvel_names[][16] = {
+ "Marvel/EV7"
+};
+static int marvel_indices[] = { 0 };
+
+static char rawhide_names[][16] = {
+ "Dodge", "Wrangler", "Durango", "Tincup", "DaVinci"
+};
+static int rawhide_indices[] = {0,0,0,1,1,2,2,3,3,4,4};
+
+static char titan_names[][16] = {
+ "DEFAULT", "Privateer", "Falcon", "Granite"
+};
+static int titan_indices[] = {0,1,2,2,3};
+
+static char tsunami_names[][16] = {
+ "0", "DP264", "Warhol", "Windjammer", "Monet", "Clipper",
+ "Goldrush", "Webbrick", "Catamaran", "Brisbane", "Melbourne",
+ "Flying Clipper", "Shark"
+};
+static int tsunami_indices[] = {0,1,2,3,4,5,6,7,8,9,10,11,12};
+
+static struct alpha_machine_vector * __init
+get_sysvec(unsigned long type, unsigned long variation, unsigned long cpu)
+{
+ static struct alpha_machine_vector *systype_vecs[] __initdata =
+ {
+ NULL, /* 0 */
+ NULL, /* ADU */
+ NULL, /* Cobra */
+ NULL, /* Ruby */
+ NULL, /* Flamingo */
+ NULL, /* Mannequin */
+ &jensen_mv,
+ NULL, /* Pelican */
+ NULL, /* Morgan */
+ NULL, /* Sable -- see below. */
+ NULL, /* Medulla */
+ &noname_mv,
+ NULL, /* Turbolaser */
+ &avanti_mv,
+ NULL, /* Mustang */
+ NULL, /* Alcor, Bret, Maverick. HWRPB inaccurate? */
+ NULL, /* Tradewind */
+ NULL, /* Mikasa -- see below. */
+ NULL, /* EB64 */
+ NULL, /* EB66 -- see variation. */
+ NULL, /* EB64+ -- see variation. */
+ &alphabook1_mv,
+ &rawhide_mv,
+ NULL, /* K2 */
+ &lynx_mv, /* Lynx */
+ &xl_mv,
+ NULL, /* EB164 -- see variation. */
+ NULL, /* Noritake -- see below. */
+ NULL, /* Cortex */
+ NULL, /* 29 */
+ &miata_mv,
+ NULL, /* XXM */
+ &takara_mv,
+ NULL, /* Yukon */
+ NULL, /* Tsunami -- see variation. */
+ &wildfire_mv, /* Wildfire */
+ NULL, /* CUSCO */
+ &eiger_mv, /* Eiger */
+ NULL, /* Titan */
+ NULL, /* Marvel */
+ };
+
+ static struct alpha_machine_vector *unofficial_vecs[] __initdata =
+ {
+ NULL, /* 100 */
+ &ruffian_mv,
+ };
+
+ static struct alpha_machine_vector *api_vecs[] __initdata =
+ {
+ NULL, /* 200 */
+ &nautilus_mv,
+ };
+
+ static struct alpha_machine_vector *alcor_vecs[] __initdata =
+ {
+ &alcor_mv, &xlt_mv, &xlt_mv
+ };
+
+ static struct alpha_machine_vector *eb164_vecs[] __initdata =
+ {
+ &eb164_mv, &pc164_mv, &lx164_mv, &sx164_mv, &rx164_mv
+ };
+
+ static struct alpha_machine_vector *eb64p_vecs[] __initdata =
+ {
+ &eb64p_mv,
+ &cabriolet_mv,
+ &cabriolet_mv /* AlphaPCI64 */
+ };
+
+ static struct alpha_machine_vector *eb66_vecs[] __initdata =
+ {
+ &eb66_mv,
+ &eb66p_mv
+ };
+
+ static struct alpha_machine_vector *marvel_vecs[] __initdata =
+ {
+ &marvel_ev7_mv,
+ };
+
+ static struct alpha_machine_vector *titan_vecs[] __initdata =
+ {
+ &titan_mv, /* default */
+ &privateer_mv, /* privateer */
+ &titan_mv, /* falcon */
+ &privateer_mv, /* granite */
+ };
+
+ static struct alpha_machine_vector *tsunami_vecs[] __initdata =
+ {
+ NULL,
+ &dp264_mv, /* dp264 */
+ &dp264_mv, /* warhol */
+ &dp264_mv, /* windjammer */
+ &monet_mv, /* monet */
+ &clipper_mv, /* clipper */
+ &dp264_mv, /* goldrush */
+ &webbrick_mv, /* webbrick */
+ &dp264_mv, /* catamaran */
+ NULL, /* brisbane? */
+ NULL, /* melbourne? */
+ NULL, /* flying clipper? */
+ &shark_mv, /* shark */
+ };
+
+ /* ??? Do we need to distinguish between Rawhides? */
+
+ struct alpha_machine_vector *vec;
+
+ /* Search the system tables first... */
+ vec = NULL;
+ if (type < N(systype_vecs)) {
+ vec = systype_vecs[type];
+ } else if ((type > ST_API_BIAS) &&
+ (type - ST_API_BIAS) < N(api_vecs)) {
+ vec = api_vecs[type - ST_API_BIAS];
+ } else if ((type > ST_UNOFFICIAL_BIAS) &&
+ (type - ST_UNOFFICIAL_BIAS) < N(unofficial_vecs)) {
+ vec = unofficial_vecs[type - ST_UNOFFICIAL_BIAS];
+ }
+
+ /* If we've not found one, try for a variation. */
+
+ if (!vec) {
+ /* Member ID is a bit-field. */
+ unsigned long member = (variation >> 10) & 0x3f;
+
+ cpu &= 0xffffffff; /* make it usable */
+
+ switch (type) {
+ case ST_DEC_ALCOR:
+ if (member < N(alcor_indices))
+ vec = alcor_vecs[alcor_indices[member]];
+ break;
+ case ST_DEC_EB164:
+ if (member < N(eb164_indices))
+ vec = eb164_vecs[eb164_indices[member]];
+ /* PC164 may show as EB164 variation with EV56 CPU,
+ but, since no true EB164 had anything but EV5... */
+ if (vec == &eb164_mv && cpu == EV56_CPU)
+ vec = &pc164_mv;
+ break;
+ case ST_DEC_EB64P:
+ if (member < N(eb64p_indices))
+ vec = eb64p_vecs[eb64p_indices[member]];
+ break;
+ case ST_DEC_EB66:
+ if (member < N(eb66_indices))
+ vec = eb66_vecs[eb66_indices[member]];
+ break;
+ case ST_DEC_MARVEL:
+ if (member < N(marvel_indices))
+ vec = marvel_vecs[marvel_indices[member]];
+ break;
+ case ST_DEC_TITAN:
+ vec = titan_vecs[0]; /* default */
+ if (member < N(titan_indices))
+ vec = titan_vecs[titan_indices[member]];
+ break;
+ case ST_DEC_TSUNAMI:
+ if (member < N(tsunami_indices))
+ vec = tsunami_vecs[tsunami_indices[member]];
+ break;
+ case ST_DEC_1000:
+ if (cpu == EV5_CPU || cpu == EV56_CPU)
+ vec = &mikasa_primo_mv;
+ else
+ vec = &mikasa_mv;
+ break;
+ case ST_DEC_NORITAKE:
+ if (cpu == EV5_CPU || cpu == EV56_CPU)
+ vec = &noritake_primo_mv;
+ else
+ vec = &noritake_mv;
+ break;
+ case ST_DEC_2100_A500:
+ if (cpu == EV5_CPU || cpu == EV56_CPU)
+ vec = &sable_gamma_mv;
+ else
+ vec = &sable_mv;
+ break;
+ }
+ }
+ return vec;
+}
+
+static struct alpha_machine_vector * __init
+get_sysvec_byname(const char *name)
+{
+ static struct alpha_machine_vector *all_vecs[] __initdata =
+ {
+ &alcor_mv,
+ &alphabook1_mv,
+ &avanti_mv,
+ &cabriolet_mv,
+ &clipper_mv,
+ &dp264_mv,
+ &eb164_mv,
+ &eb64p_mv,
+ &eb66_mv,
+ &eb66p_mv,
+ &eiger_mv,
+ &jensen_mv,
+ &lx164_mv,
+ &lynx_mv,
+ &miata_mv,
+ &mikasa_mv,
+ &mikasa_primo_mv,
+ &monet_mv,
+ &nautilus_mv,
+ &noname_mv,
+ &noritake_mv,
+ &noritake_primo_mv,
+ &p2k_mv,
+ &pc164_mv,
+ &privateer_mv,
+ &rawhide_mv,
+ &ruffian_mv,
+ &rx164_mv,
+ &sable_mv,
+ &sable_gamma_mv,
+ &shark_mv,
+ &sx164_mv,
+ &takara_mv,
+ &webbrick_mv,
+ &wildfire_mv,
+ &xl_mv,
+ &xlt_mv
+ };
+
+ size_t i;
+
+ for (i = 0; i < N(all_vecs); ++i) {
+ struct alpha_machine_vector *mv = all_vecs[i];
+ if (strcasecmp(mv->vector_name, name) == 0)
+ return mv;
+ }
+ return NULL;
+}
+
+static void
+get_sysnames(unsigned long type, unsigned long variation, unsigned long cpu,
+ char **type_name, char **variation_name)
+{
+ unsigned long member;
+
+ /* If not in the tables, make it UNKNOWN,
+ else set type name to family */
+ if (type < N(systype_names)) {
+ *type_name = systype_names[type];
+ } else if ((type > ST_API_BIAS) &&
+ (type - ST_API_BIAS) < N(api_names)) {
+ *type_name = api_names[type - ST_API_BIAS];
+ } else if ((type > ST_UNOFFICIAL_BIAS) &&
+ (type - ST_UNOFFICIAL_BIAS) < N(unofficial_names)) {
+ *type_name = unofficial_names[type - ST_UNOFFICIAL_BIAS];
+ } else {
+ *type_name = sys_unknown;
+ *variation_name = sys_unknown;
+ return;
+ }
+
+ /* Set variation to "0"; if variation is zero, done. */
+ *variation_name = systype_names[0];
+ if (variation == 0) {
+ return;
+ }
+
+ member = (variation >> 10) & 0x3f; /* member ID is a bit-field */
+
+ cpu &= 0xffffffff; /* make it usable */
+
+ switch (type) { /* select by family */
+ default: /* default to variation "0" for now */
+ break;
+ case ST_DEC_EB164:
+ if (member < N(eb164_indices))
+ *variation_name = eb164_names[eb164_indices[member]];
+ /* PC164 may show as EB164 variation, but with EV56 CPU,
+ so, since no true EB164 had anything but EV5... */
+ if (eb164_indices[member] == 0 && cpu == EV56_CPU)
+ *variation_name = eb164_names[1]; /* make it PC164 */
+ break;
+ case ST_DEC_ALCOR:
+ if (member < N(alcor_indices))
+ *variation_name = alcor_names[alcor_indices[member]];
+ break;
+ case ST_DEC_EB64P:
+ if (member < N(eb64p_indices))
+ *variation_name = eb64p_names[eb64p_indices[member]];
+ break;
+ case ST_DEC_EB66:
+ if (member < N(eb66_indices))
+ *variation_name = eb66_names[eb66_indices[member]];
+ break;
+ case ST_DEC_MARVEL:
+ if (member < N(marvel_indices))
+ *variation_name = marvel_names[marvel_indices[member]];
+ break;
+ case ST_DEC_RAWHIDE:
+ if (member < N(rawhide_indices))
+ *variation_name = rawhide_names[rawhide_indices[member]];
+ break;
+ case ST_DEC_TITAN:
+ *variation_name = titan_names[0]; /* default */
+ if (member < N(titan_indices))
+ *variation_name = titan_names[titan_indices[member]];
+ break;
+ case ST_DEC_TSUNAMI:
+ if (member < N(tsunami_indices))
+ *variation_name = tsunami_names[tsunami_indices[member]];
+ break;
+ }
+}
+
+/*
+ * A change was made to the HWRPB via an ECO and the following code
+ * tracks a part of the ECO. In HWRPB versions less than 5, the ECO
+ * was not implemented in the console firmware. If it's revision 5 or
+ * greater we can get the name of the platform as an ASCII string from
+ * the HWRPB. That's what this function does. It checks the revision
+ * level and if the string is in the HWRPB it returns the address of
+ * the string--a pointer to the name of the platform.
+ *
+ * Returns:
+ * - Pointer to a ASCII string if it's in the HWRPB
+ * - Pointer to a blank string if the data is not in the HWRPB.
+ */
+
+static char *
+platform_string(void)
+{
+ struct dsr_struct *dsr;
+ static char unk_system_string[] = "N/A";
+
+ /* Go to the console for the string pointer.
+ * If the rpb_vers is not 5 or greater the rpb
+ * is old and does not have this data in it.
+ */
+ if (hwrpb->revision < 5)
+ return (unk_system_string);
+ else {
+ /* The Dynamic System Recognition struct
+ * has the system platform name starting
+ * after the character count of the string.
+ */
+ dsr = ((struct dsr_struct *)
+ ((char *)hwrpb + hwrpb->dsr_offset));
+ return ((char *)dsr + (dsr->sysname_off +
+ sizeof(long)));
+ }
+}
+
+static int
+get_nr_processors(struct percpu_struct *cpubase, unsigned long num)
+{
+ struct percpu_struct *cpu;
+ unsigned long i;
+ int count = 0;
+
+ for (i = 0; i < num; i++) {
+ cpu = (struct percpu_struct *)
+ ((char *)cpubase + i*hwrpb->processor_size);
+ if ((cpu->flags & 0x1cc) == 0x1cc)
+ count++;
+ }
+ return count;
+}
+
+static void
+show_cache_size (struct seq_file *f, const char *which, int shape)
+{
+ if (shape == -1)
+ seq_printf (f, "%s\t\t: n/a\n", which);
+ else if (shape == 0)
+ seq_printf (f, "%s\t\t: unknown\n", which);
+ else
+ seq_printf (f, "%s\t\t: %dK, %d-way, %db line\n",
+ which, shape >> 10, shape & 15,
+ 1 << ((shape >> 4) & 15));
+}
+
+static int
+show_cpuinfo(struct seq_file *f, void *slot)
+{
+ extern struct unaligned_stat {
+ unsigned long count, va, pc;
+ } unaligned[2];
+
+ static char cpu_names[][8] = {
+ "EV3", "EV4", "Simulate", "LCA4", "EV5", "EV45", "EV56",
+ "EV6", "PCA56", "PCA57", "EV67", "EV68CB", "EV68AL",
+ "EV68CX", "EV7", "EV79", "EV69"
+ };
+
+ struct percpu_struct *cpu = slot;
+ unsigned int cpu_index;
+ char *cpu_name;
+ char *systype_name;
+ char *sysvariation_name;
+ int nr_processors;
+
+ cpu_index = (unsigned) (cpu->type - 1);
+ cpu_name = "Unknown";
+ if (cpu_index < N(cpu_names))
+ cpu_name = cpu_names[cpu_index];
+
+ get_sysnames(hwrpb->sys_type, hwrpb->sys_variation,
+ cpu->type, &systype_name, &sysvariation_name);
+
+ nr_processors = get_nr_processors(cpu, hwrpb->nr_processors);
+
+ seq_printf(f, "cpu\t\t\t: Alpha\n"
+ "cpu model\t\t: %s\n"
+ "cpu variation\t\t: %ld\n"
+ "cpu revision\t\t: %ld\n"
+ "cpu serial number\t: %s\n"
+ "system type\t\t: %s\n"
+ "system variation\t: %s\n"
+ "system revision\t\t: %ld\n"
+ "system serial number\t: %s\n"
+ "cycle frequency [Hz]\t: %lu %s\n"
+ "timer frequency [Hz]\t: %lu.%02lu\n"
+ "page size [bytes]\t: %ld\n"
+ "phys. address bits\t: %ld\n"
+ "max. addr. space #\t: %ld\n"
+ "BogoMIPS\t\t: %lu.%02lu\n"
+ "kernel unaligned acc\t: %ld (pc=%lx,va=%lx)\n"
+ "user unaligned acc\t: %ld (pc=%lx,va=%lx)\n"
+ "platform string\t\t: %s\n"
+ "cpus detected\t\t: %d\n",
+ cpu_name, cpu->variation, cpu->revision,
+ (char*)cpu->serial_no,
+ systype_name, sysvariation_name, hwrpb->sys_revision,
+ (char*)hwrpb->ssn,
+ est_cycle_freq ? : hwrpb->cycle_freq,
+ est_cycle_freq ? "est." : "",
+ hwrpb->intr_freq / 4096,
+ (100 * hwrpb->intr_freq / 4096) % 100,
+ hwrpb->pagesize,
+ hwrpb->pa_bits,
+ hwrpb->max_asn,
+ loops_per_jiffy / (500000/HZ),
+ (loops_per_jiffy / (5000/HZ)) % 100,
+ unaligned[0].count, unaligned[0].pc, unaligned[0].va,
+ unaligned[1].count, unaligned[1].pc, unaligned[1].va,
+ platform_string(), nr_processors);
+
+#ifdef CONFIG_SMP
+ seq_printf(f, "cpus active\t\t: %d\n"
+ "cpu active mask\t\t: %016lx\n",
+ num_online_cpus(), cpus_addr(cpu_possible_map)[0]);
+#endif
+
+ show_cache_size (f, "L1 Icache", alpha_l1i_cacheshape);
+ show_cache_size (f, "L1 Dcache", alpha_l1d_cacheshape);
+ show_cache_size (f, "L2 cache", alpha_l2_cacheshape);
+ show_cache_size (f, "L3 cache", alpha_l3_cacheshape);
+
+ return 0;
+}
+
+static int __init
+read_mem_block(int *addr, int stride, int size)
+{
+ long nloads = size / stride, cnt, tmp;
+
+ __asm__ __volatile__(
+ " rpcc %0\n"
+ "1: ldl %3,0(%2)\n"
+ " subq %1,1,%1\n"
+ /* Next two XORs introduce an explicit data dependency between
+ consecutive loads in the loop, which will give us true load
+ latency. */
+ " xor %3,%2,%2\n"
+ " xor %3,%2,%2\n"
+ " addq %2,%4,%2\n"
+ " bne %1,1b\n"
+ " rpcc %3\n"
+ " subl %3,%0,%0\n"
+ : "=&r" (cnt), "=&r" (nloads), "=&r" (addr), "=&r" (tmp)
+ : "r" (stride), "1" (nloads), "2" (addr));
+
+ return cnt / (size / stride);
+}
+
+#define CSHAPE(totalsize, linesize, assoc) \
+ ((totalsize & ~0xff) | (linesize << 4) | assoc)
+
+/* ??? EV5 supports up to 64M, but did the systems with more than
+ 16M of BCACHE ever exist? */
+#define MAX_BCACHE_SIZE 16*1024*1024
+
+/* Note that the offchip caches are direct mapped on all Alphas. */
+static int __init
+external_cache_probe(int minsize, int width)
+{
+ int cycles, prev_cycles = 1000000;
+ int stride = 1 << width;
+ long size = minsize, maxsize = MAX_BCACHE_SIZE * 2;
+
+ if (maxsize > (max_low_pfn + 1) << PAGE_SHIFT)
+ maxsize = 1 << (floor_log2(max_low_pfn + 1) + PAGE_SHIFT);
+
+ /* Get the first block cached. */
+ read_mem_block(__va(0), stride, size);
+
+ while (size < maxsize) {
+ /* Get an average load latency in cycles. */
+ cycles = read_mem_block(__va(0), stride, size);
+ if (cycles > prev_cycles * 2) {
+ /* Fine, we exceed the cache. */
+ printk("%ldK Bcache detected; load hit latency %d "
+ "cycles, load miss latency %d cycles\n",
+ size >> 11, prev_cycles, cycles);
+ return CSHAPE(size >> 1, width, 1);
+ }
+ /* Try to get the next block cached. */
+ read_mem_block(__va(size), stride, size);
+ prev_cycles = cycles;
+ size <<= 1;
+ }
+ return -1; /* No BCACHE found. */
+}
+
+static void __init
+determine_cpu_caches (unsigned int cpu_type)
+{
+ int L1I, L1D, L2, L3;
+
+ switch (cpu_type) {
+ case EV4_CPU:
+ case EV45_CPU:
+ {
+ if (cpu_type == EV4_CPU)
+ L1I = CSHAPE(8*1024, 5, 1);
+ else
+ L1I = CSHAPE(16*1024, 5, 1);
+ L1D = L1I;
+ L3 = -1;
+
+ /* BIU_CTL is a write-only Abox register. PALcode has a
+ shadow copy, and may be available from some versions
+ of the CSERVE PALcall. If we can get it, then
+
+ unsigned long biu_ctl, size;
+ size = 128*1024 * (1 << ((biu_ctl >> 28) & 7));
+ L2 = CSHAPE (size, 5, 1);
+
+ Unfortunately, we can't rely on that.
+ */
+ L2 = external_cache_probe(128*1024, 5);
+ break;
+ }
+
+ case LCA4_CPU:
+ {
+ unsigned long car, size;
+
+ L1I = L1D = CSHAPE(8*1024, 5, 1);
+ L3 = -1;
+
+ car = *(vuip) phys_to_virt (0x120000078UL);
+ size = 64*1024 * (1 << ((car >> 5) & 7));
+ /* No typo -- 8 byte cacheline size. Whodathunk. */
+ L2 = (car & 1 ? CSHAPE (size, 3, 1) : -1);
+ break;
+ }
+
+ case EV5_CPU:
+ case EV56_CPU:
+ {
+ unsigned long sc_ctl, width;
+
+ L1I = L1D = CSHAPE(8*1024, 5, 1);
+
+ /* Check the line size of the Scache. */
+ sc_ctl = *(vulp) phys_to_virt (0xfffff000a8UL);
+ width = sc_ctl & 0x1000 ? 6 : 5;
+ L2 = CSHAPE (96*1024, width, 3);
+
+ /* BC_CONTROL and BC_CONFIG are write-only IPRs. PALcode
+ has a shadow copy, and may be available from some versions
+ of the CSERVE PALcall. If we can get it, then
+
+ unsigned long bc_control, bc_config, size;
+ size = 1024*1024 * (1 << ((bc_config & 7) - 1));
+ L3 = (bc_control & 1 ? CSHAPE (size, width, 1) : -1);
+
+ Unfortunately, we can't rely on that.
+ */
+ L3 = external_cache_probe(1024*1024, width);
+ break;
+ }
+
+ case PCA56_CPU:
+ case PCA57_CPU:
+ {
+ unsigned long cbox_config, size;
+
+ if (cpu_type == PCA56_CPU) {
+ L1I = CSHAPE(16*1024, 6, 1);
+ L1D = CSHAPE(8*1024, 5, 1);
+ } else {
+ L1I = CSHAPE(32*1024, 6, 2);
+ L1D = CSHAPE(16*1024, 5, 1);
+ }
+ L3 = -1;
+
+ cbox_config = *(vulp) phys_to_virt (0xfffff00008UL);
+ size = 512*1024 * (1 << ((cbox_config >> 12) & 3));
+
+#if 0
+ L2 = ((cbox_config >> 31) & 1 ? CSHAPE (size, 6, 1) : -1);
+#else
+ L2 = external_cache_probe(512*1024, 6);
+#endif
+ break;
+ }
+
+ case EV6_CPU:
+ case EV67_CPU:
+ case EV68CB_CPU:
+ case EV68AL_CPU:
+ case EV68CX_CPU:
+ case EV69_CPU:
+ L1I = L1D = CSHAPE(64*1024, 6, 2);
+ L2 = external_cache_probe(1024*1024, 6);
+ L3 = -1;
+ break;
+
+ case EV7_CPU:
+ case EV79_CPU:
+ L1I = L1D = CSHAPE(64*1024, 6, 2);
+ L2 = CSHAPE(7*1024*1024/4, 6, 7);
+ L3 = -1;
+ break;
+
+ default:
+ /* Nothing known about this cpu type. */
+ L1I = L1D = L2 = L3 = 0;
+ break;
+ }
+
+ alpha_l1i_cacheshape = L1I;
+ alpha_l1d_cacheshape = L1D;
+ alpha_l2_cacheshape = L2;
+ alpha_l3_cacheshape = L3;
+}
+
+/*
+ * We show only CPU #0 info.
+ */
+static void *
+c_start(struct seq_file *f, loff_t *pos)
+{
+ return *pos ? NULL : (char *)hwrpb + hwrpb->processor_offset;
+}
+
+static void *
+c_next(struct seq_file *f, void *v, loff_t *pos)
+{
+ return NULL;
+}
+
+static void
+c_stop(struct seq_file *f, void *v)
+{
+}
+
+struct seq_operations cpuinfo_op = {
+ .start = c_start,
+ .next = c_next,
+ .stop = c_stop,
+ .show = show_cpuinfo,
+};
+
+
+static int
+alpha_panic_event(struct notifier_block *this, unsigned long event, void *ptr)
+{
+#if 1
+ /* FIXME FIXME FIXME */
+ /* If we are using SRM and serial console, just hard halt here. */
+ if (alpha_using_srm && srmcons_output)
+ __halt();
+#endif
+ return NOTIFY_DONE;
+}
diff --git a/arch/alpha/kernel/signal.c b/arch/alpha/kernel/signal.c
new file mode 100644
index 000000000000..08fe8071a7f8
--- /dev/null
+++ b/arch/alpha/kernel/signal.c
@@ -0,0 +1,672 @@
+/*
+ * linux/arch/alpha/kernel/signal.c
+ *
+ * Copyright (C) 1995 Linus Torvalds
+ *
+ * 1997-11-02 Modified for POSIX.1b signals by Richard Henderson
+ */
+
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/signal.h>
+#include <linux/errno.h>
+#include <linux/wait.h>
+#include <linux/ptrace.h>
+#include <linux/unistd.h>
+#include <linux/mm.h>
+#include <linux/smp.h>
+#include <linux/smp_lock.h>
+#include <linux/stddef.h>
+#include <linux/tty.h>
+#include <linux/binfmts.h>
+#include <linux/bitops.h>
+
+#include <asm/uaccess.h>
+#include <asm/sigcontext.h>
+#include <asm/ucontext.h>
+
+#include "proto.h"
+
+
+#define DEBUG_SIG 0
+
+#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
+
+asmlinkage void ret_from_sys_call(void);
+static int do_signal(sigset_t *, struct pt_regs *, struct switch_stack *,
+ unsigned long, unsigned long);
+
+
+/*
+ * The OSF/1 sigprocmask calling sequence is different from the
+ * C sigprocmask() sequence..
+ *
+ * how:
+ * 1 - SIG_BLOCK
+ * 2 - SIG_UNBLOCK
+ * 3 - SIG_SETMASK
+ *
+ * We change the range to -1 .. 1 in order to let gcc easily
+ * use the conditional move instructions.
+ *
+ * Note that we don't need to acquire the kernel lock for SMP
+ * operation, as all of this is local to this thread.
+ */
+asmlinkage unsigned long
+do_osf_sigprocmask(int how, unsigned long newmask, struct pt_regs *regs)
+{
+ unsigned long oldmask = -EINVAL;
+
+ if ((unsigned long)how-1 <= 2) {
+ long sign = how-2; /* -1 .. 1 */
+ unsigned long block, unblock;
+
+ newmask &= _BLOCKABLE;
+ spin_lock_irq(&current->sighand->siglock);
+ oldmask = current->blocked.sig[0];
+
+ unblock = oldmask & ~newmask;
+ block = oldmask | newmask;
+ if (!sign)
+ block = unblock;
+ if (sign <= 0)
+ newmask = block;
+ if (_NSIG_WORDS > 1 && sign > 0)
+ sigemptyset(&current->blocked);
+ current->blocked.sig[0] = newmask;
+ recalc_sigpending();
+ spin_unlock_irq(&current->sighand->siglock);
+
+ regs->r0 = 0; /* special no error return */
+ }
+ return oldmask;
+}
+
+asmlinkage int
+osf_sigaction(int sig, const struct osf_sigaction __user *act,
+ struct osf_sigaction __user *oact)
+{
+ struct k_sigaction new_ka, old_ka;
+ int ret;
+
+ if (act) {
+ old_sigset_t mask;
+ if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
+ __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
+ __get_user(new_ka.sa.sa_flags, &act->sa_flags))
+ return -EFAULT;
+ __get_user(mask, &act->sa_mask);
+ siginitset(&new_ka.sa.sa_mask, mask);
+ new_ka.ka_restorer = NULL;
+ }
+
+ ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
+
+ if (!ret && oact) {
+ if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
+ __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
+ __put_user(old_ka.sa.sa_flags, &oact->sa_flags))
+ return -EFAULT;
+ __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask);
+ }
+
+ return ret;
+}
+
+asmlinkage long
+sys_rt_sigaction(int sig, const struct sigaction __user *act,
+ struct sigaction __user *oact,
+ size_t sigsetsize, void __user *restorer)
+{
+ struct k_sigaction new_ka, old_ka;
+ int ret;
+
+ /* XXX: Don't preclude handling different sized sigset_t's. */
+ if (sigsetsize != sizeof(sigset_t))
+ return -EINVAL;
+
+ if (act) {
+ new_ka.ka_restorer = restorer;
+ if (copy_from_user(&new_ka.sa, act, sizeof(*act)))
+ return -EFAULT;
+ }
+
+ ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
+
+ if (!ret && oact) {
+ if (copy_to_user(oact, &old_ka.sa, sizeof(*oact)))
+ return -EFAULT;
+ }
+
+ return ret;
+}
+
+/*
+ * Atomically swap in the new signal mask, and wait for a signal.
+ */
+asmlinkage int
+do_sigsuspend(old_sigset_t mask, struct pt_regs *regs, struct switch_stack *sw)
+{
+ sigset_t oldset;
+
+ mask &= _BLOCKABLE;
+ spin_lock_irq(&current->sighand->siglock);
+ oldset = current->blocked;
+ siginitset(&current->blocked, mask);
+ recalc_sigpending();
+ spin_unlock_irq(&current->sighand->siglock);
+
+ /* Indicate EINTR on return from any possible signal handler,
+ which will not come back through here, but via sigreturn. */
+ regs->r0 = EINTR;
+ regs->r19 = 1;
+
+ while (1) {
+ current->state = TASK_INTERRUPTIBLE;
+ schedule();
+ if (do_signal(&oldset, regs, sw, 0, 0))
+ return -EINTR;
+ }
+}
+
+asmlinkage int
+do_rt_sigsuspend(sigset_t __user *uset, size_t sigsetsize,
+ struct pt_regs *regs, struct switch_stack *sw)
+{
+ sigset_t oldset, set;
+
+ /* XXX: Don't preclude handling different sized sigset_t's. */
+ if (sigsetsize != sizeof(sigset_t))
+ return -EINVAL;
+ if (copy_from_user(&set, uset, sizeof(set)))
+ return -EFAULT;
+
+ sigdelsetmask(&set, ~_BLOCKABLE);
+ spin_lock_irq(&current->sighand->siglock);
+ oldset = current->blocked;
+ current->blocked = set;
+ recalc_sigpending();
+ spin_unlock_irq(&current->sighand->siglock);
+
+ /* Indicate EINTR on return from any possible signal handler,
+ which will not come back through here, but via sigreturn. */
+ regs->r0 = EINTR;
+ regs->r19 = 1;
+
+ while (1) {
+ current->state = TASK_INTERRUPTIBLE;
+ schedule();
+ if (do_signal(&oldset, regs, sw, 0, 0))
+ return -EINTR;
+ }
+}
+
+asmlinkage int
+sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss)
+{
+ return do_sigaltstack(uss, uoss, rdusp());
+}
+
+/*
+ * Do a signal return; undo the signal stack.
+ */
+
+#if _NSIG_WORDS > 1
+# error "Non SA_SIGINFO frame needs rearranging"
+#endif
+
+struct sigframe
+{
+ struct sigcontext sc;
+ unsigned int retcode[3];
+};
+
+struct rt_sigframe
+{
+ struct siginfo info;
+ struct ucontext uc;
+ unsigned int retcode[3];
+};
+
+/* If this changes, userland unwinders that Know Things about our signal
+ frame will break. Do not undertake lightly. It also implies an ABI
+ change wrt the size of siginfo_t, which may cause some pain. */
+extern char compile_time_assert
+ [offsetof(struct rt_sigframe, uc.uc_mcontext) == 176 ? 1 : -1];
+
+#define INSN_MOV_R30_R16 0x47fe0410
+#define INSN_LDI_R0 0x201f0000
+#define INSN_CALLSYS 0x00000083
+
+static long
+restore_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs,
+ struct switch_stack *sw)
+{
+ unsigned long usp;
+ long i, err = __get_user(regs->pc, &sc->sc_pc);
+
+ sw->r26 = (unsigned long) ret_from_sys_call;
+
+ err |= __get_user(regs->r0, sc->sc_regs+0);
+ err |= __get_user(regs->r1, sc->sc_regs+1);
+ err |= __get_user(regs->r2, sc->sc_regs+2);
+ err |= __get_user(regs->r3, sc->sc_regs+3);
+ err |= __get_user(regs->r4, sc->sc_regs+4);
+ err |= __get_user(regs->r5, sc->sc_regs+5);
+ err |= __get_user(regs->r6, sc->sc_regs+6);
+ err |= __get_user(regs->r7, sc->sc_regs+7);
+ err |= __get_user(regs->r8, sc->sc_regs+8);
+ err |= __get_user(sw->r9, sc->sc_regs+9);
+ err |= __get_user(sw->r10, sc->sc_regs+10);
+ err |= __get_user(sw->r11, sc->sc_regs+11);
+ err |= __get_user(sw->r12, sc->sc_regs+12);
+ err |= __get_user(sw->r13, sc->sc_regs+13);
+ err |= __get_user(sw->r14, sc->sc_regs+14);
+ err |= __get_user(sw->r15, sc->sc_regs+15);
+ err |= __get_user(regs->r16, sc->sc_regs+16);
+ err |= __get_user(regs->r17, sc->sc_regs+17);
+ err |= __get_user(regs->r18, sc->sc_regs+18);
+ err |= __get_user(regs->r19, sc->sc_regs+19);
+ err |= __get_user(regs->r20, sc->sc_regs+20);
+ err |= __get_user(regs->r21, sc->sc_regs+21);
+ err |= __get_user(regs->r22, sc->sc_regs+22);
+ err |= __get_user(regs->r23, sc->sc_regs+23);
+ err |= __get_user(regs->r24, sc->sc_regs+24);
+ err |= __get_user(regs->r25, sc->sc_regs+25);
+ err |= __get_user(regs->r26, sc->sc_regs+26);
+ err |= __get_user(regs->r27, sc->sc_regs+27);
+ err |= __get_user(regs->r28, sc->sc_regs+28);
+ err |= __get_user(regs->gp, sc->sc_regs+29);
+ err |= __get_user(usp, sc->sc_regs+30);
+ wrusp(usp);
+
+ for (i = 0; i < 31; i++)
+ err |= __get_user(sw->fp[i], sc->sc_fpregs+i);
+ err |= __get_user(sw->fp[31], &sc->sc_fpcr);
+
+ return err;
+}
+
+/* Note that this syscall is also used by setcontext(3) to install
+ a given sigcontext. This because it's impossible to set *all*
+ registers and transfer control from userland. */
+
+asmlinkage void
+do_sigreturn(struct sigcontext __user *sc, struct pt_regs *regs,
+ struct switch_stack *sw)
+{
+ sigset_t set;
+
+ /* Verify that it's a good sigcontext before using it */
+ if (!access_ok(VERIFY_READ, sc, sizeof(*sc)))
+ goto give_sigsegv;
+ if (__get_user(set.sig[0], &sc->sc_mask))
+ goto give_sigsegv;
+
+ sigdelsetmask(&set, ~_BLOCKABLE);
+ spin_lock_irq(&current->sighand->siglock);
+ current->blocked = set;
+ recalc_sigpending();
+ spin_unlock_irq(&current->sighand->siglock);
+
+ if (restore_sigcontext(sc, regs, sw))
+ goto give_sigsegv;
+
+ /* Send SIGTRAP if we're single-stepping: */
+ if (ptrace_cancel_bpt (current)) {
+ siginfo_t info;
+
+ info.si_signo = SIGTRAP;
+ info.si_errno = 0;
+ info.si_code = TRAP_BRKPT;
+ info.si_addr = (void __user *) regs->pc;
+ info.si_trapno = 0;
+ send_sig_info(SIGTRAP, &info, current);
+ }
+ return;
+
+give_sigsegv:
+ force_sig(SIGSEGV, current);
+}
+
+asmlinkage void
+do_rt_sigreturn(struct rt_sigframe __user *frame, struct pt_regs *regs,
+ struct switch_stack *sw)
+{
+ sigset_t set;
+
+ /* Verify that it's a good ucontext_t before using it */
+ if (!access_ok(VERIFY_READ, &frame->uc, sizeof(frame->uc)))
+ goto give_sigsegv;
+ if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
+ goto give_sigsegv;
+
+ sigdelsetmask(&set, ~_BLOCKABLE);
+ spin_lock_irq(&current->sighand->siglock);
+ current->blocked = set;
+ recalc_sigpending();
+ spin_unlock_irq(&current->sighand->siglock);
+
+ if (restore_sigcontext(&frame->uc.uc_mcontext, regs, sw))
+ goto give_sigsegv;
+
+ /* Send SIGTRAP if we're single-stepping: */
+ if (ptrace_cancel_bpt (current)) {
+ siginfo_t info;
+
+ info.si_signo = SIGTRAP;
+ info.si_errno = 0;
+ info.si_code = TRAP_BRKPT;
+ info.si_addr = (void __user *) regs->pc;
+ info.si_trapno = 0;
+ send_sig_info(SIGTRAP, &info, current);
+ }
+ return;
+
+give_sigsegv:
+ force_sig(SIGSEGV, current);
+}
+
+
+/*
+ * Set up a signal frame.
+ */
+
+static inline void __user *
+get_sigframe(struct k_sigaction *ka, unsigned long sp, size_t frame_size)
+{
+ if ((ka->sa.sa_flags & SA_ONSTACK) != 0 && ! on_sig_stack(sp))
+ sp = current->sas_ss_sp + current->sas_ss_size;
+
+ return (void __user *)((sp - frame_size) & -32ul);
+}
+
+static long
+setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs,
+ struct switch_stack *sw, unsigned long mask, unsigned long sp)
+{
+ long i, err = 0;
+
+ err |= __put_user(on_sig_stack((unsigned long)sc), &sc->sc_onstack);
+ err |= __put_user(mask, &sc->sc_mask);
+ err |= __put_user(regs->pc, &sc->sc_pc);
+ err |= __put_user(8, &sc->sc_ps);
+
+ err |= __put_user(regs->r0 , sc->sc_regs+0);
+ err |= __put_user(regs->r1 , sc->sc_regs+1);
+ err |= __put_user(regs->r2 , sc->sc_regs+2);
+ err |= __put_user(regs->r3 , sc->sc_regs+3);
+ err |= __put_user(regs->r4 , sc->sc_regs+4);
+ err |= __put_user(regs->r5 , sc->sc_regs+5);
+ err |= __put_user(regs->r6 , sc->sc_regs+6);
+ err |= __put_user(regs->r7 , sc->sc_regs+7);
+ err |= __put_user(regs->r8 , sc->sc_regs+8);
+ err |= __put_user(sw->r9 , sc->sc_regs+9);
+ err |= __put_user(sw->r10 , sc->sc_regs+10);
+ err |= __put_user(sw->r11 , sc->sc_regs+11);
+ err |= __put_user(sw->r12 , sc->sc_regs+12);
+ err |= __put_user(sw->r13 , sc->sc_regs+13);
+ err |= __put_user(sw->r14 , sc->sc_regs+14);
+ err |= __put_user(sw->r15 , sc->sc_regs+15);
+ err |= __put_user(regs->r16, sc->sc_regs+16);
+ err |= __put_user(regs->r17, sc->sc_regs+17);
+ err |= __put_user(regs->r18, sc->sc_regs+18);
+ err |= __put_user(regs->r19, sc->sc_regs+19);
+ err |= __put_user(regs->r20, sc->sc_regs+20);
+ err |= __put_user(regs->r21, sc->sc_regs+21);
+ err |= __put_user(regs->r22, sc->sc_regs+22);
+ err |= __put_user(regs->r23, sc->sc_regs+23);
+ err |= __put_user(regs->r24, sc->sc_regs+24);
+ err |= __put_user(regs->r25, sc->sc_regs+25);
+ err |= __put_user(regs->r26, sc->sc_regs+26);
+ err |= __put_user(regs->r27, sc->sc_regs+27);
+ err |= __put_user(regs->r28, sc->sc_regs+28);
+ err |= __put_user(regs->gp , sc->sc_regs+29);
+ err |= __put_user(sp, sc->sc_regs+30);
+ err |= __put_user(0, sc->sc_regs+31);
+
+ for (i = 0; i < 31; i++)
+ err |= __put_user(sw->fp[i], sc->sc_fpregs+i);
+ err |= __put_user(0, sc->sc_fpregs+31);
+ err |= __put_user(sw->fp[31], &sc->sc_fpcr);
+
+ err |= __put_user(regs->trap_a0, &sc->sc_traparg_a0);
+ err |= __put_user(regs->trap_a1, &sc->sc_traparg_a1);
+ err |= __put_user(regs->trap_a2, &sc->sc_traparg_a2);
+
+ return err;
+}
+
+static void
+setup_frame(int sig, struct k_sigaction *ka, sigset_t *set,
+ struct pt_regs *regs, struct switch_stack * sw)
+{
+ unsigned long oldsp, r26, err = 0;
+ struct sigframe __user *frame;
+
+ oldsp = rdusp();
+ frame = get_sigframe(ka, oldsp, sizeof(*frame));
+ if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
+ goto give_sigsegv;
+
+ err |= setup_sigcontext(&frame->sc, regs, sw, set->sig[0], oldsp);
+ if (err)
+ goto give_sigsegv;
+
+ /* Set up to return from userspace. If provided, use a stub
+ already in userspace. */
+ if (ka->ka_restorer) {
+ r26 = (unsigned long) ka->ka_restorer;
+ } else {
+ err |= __put_user(INSN_MOV_R30_R16, frame->retcode+0);
+ err |= __put_user(INSN_LDI_R0+__NR_sigreturn, frame->retcode+1);
+ err |= __put_user(INSN_CALLSYS, frame->retcode+2);
+ imb();
+ r26 = (unsigned long) frame->retcode;
+ }
+
+ /* Check that everything was written properly. */
+ if (err)
+ goto give_sigsegv;
+
+ /* "Return" to the handler */
+ regs->r26 = r26;
+ regs->r27 = regs->pc = (unsigned long) ka->sa.sa_handler;
+ regs->r16 = sig; /* a0: signal number */
+ regs->r17 = 0; /* a1: exception code */
+ regs->r18 = (unsigned long) &frame->sc; /* a2: sigcontext pointer */
+ wrusp((unsigned long) frame);
+
+#if DEBUG_SIG
+ printk("SIG deliver (%s:%d): sp=%p pc=%p ra=%p\n",
+ current->comm, current->pid, frame, regs->pc, regs->r26);
+#endif
+
+ return;
+
+give_sigsegv:
+ force_sigsegv(sig, current);
+}
+
+static void
+setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
+ sigset_t *set, struct pt_regs *regs, struct switch_stack * sw)
+{
+ unsigned long oldsp, r26, err = 0;
+ struct rt_sigframe __user *frame;
+
+ oldsp = rdusp();
+ frame = get_sigframe(ka, oldsp, sizeof(*frame));
+ if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
+ goto give_sigsegv;
+
+ err |= copy_siginfo_to_user(&frame->info, info);
+
+ /* Create the ucontext. */
+ err |= __put_user(0, &frame->uc.uc_flags);
+ err |= __put_user(0, &frame->uc.uc_link);
+ err |= __put_user(set->sig[0], &frame->uc.uc_osf_sigmask);
+ err |= __put_user(current->sas_ss_sp, &frame->uc.uc_stack.ss_sp);
+ err |= __put_user(sas_ss_flags(oldsp), &frame->uc.uc_stack.ss_flags);
+ err |= __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size);
+ err |= setup_sigcontext(&frame->uc.uc_mcontext, regs, sw,
+ set->sig[0], oldsp);
+ err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
+ if (err)
+ goto give_sigsegv;
+
+ /* Set up to return from userspace. If provided, use a stub
+ already in userspace. */
+ if (ka->ka_restorer) {
+ r26 = (unsigned long) ka->ka_restorer;
+ } else {
+ err |= __put_user(INSN_MOV_R30_R16, frame->retcode+0);
+ err |= __put_user(INSN_LDI_R0+__NR_rt_sigreturn,
+ frame->retcode+1);
+ err |= __put_user(INSN_CALLSYS, frame->retcode+2);
+ imb();
+ r26 = (unsigned long) frame->retcode;
+ }
+
+ if (err)
+ goto give_sigsegv;
+
+ /* "Return" to the handler */
+ regs->r26 = r26;
+ regs->r27 = regs->pc = (unsigned long) ka->sa.sa_handler;
+ regs->r16 = sig; /* a0: signal number */
+ regs->r17 = (unsigned long) &frame->info; /* a1: siginfo pointer */
+ regs->r18 = (unsigned long) &frame->uc; /* a2: ucontext pointer */
+ wrusp((unsigned long) frame);
+
+#if DEBUG_SIG
+ printk("SIG deliver (%s:%d): sp=%p pc=%p ra=%p\n",
+ current->comm, current->pid, frame, regs->pc, regs->r26);
+#endif
+
+ return;
+
+give_sigsegv:
+ force_sigsegv(sig, current);
+}
+
+
+/*
+ * OK, we're invoking a handler.
+ */
+static inline void
+handle_signal(int sig, struct k_sigaction *ka, siginfo_t *info,
+ sigset_t *oldset, struct pt_regs * regs, struct switch_stack *sw)
+{
+ if (ka->sa.sa_flags & SA_SIGINFO)
+ setup_rt_frame(sig, ka, info, oldset, regs, sw);
+ else
+ setup_frame(sig, ka, oldset, regs, sw);
+
+ if (ka->sa.sa_flags & SA_RESETHAND)
+ ka->sa.sa_handler = SIG_DFL;
+
+ if (!(ka->sa.sa_flags & SA_NODEFER)) {
+ spin_lock_irq(&current->sighand->siglock);
+ sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
+ sigaddset(&current->blocked,sig);
+ recalc_sigpending();
+ spin_unlock_irq(&current->sighand->siglock);
+ }
+}
+
+static inline void
+syscall_restart(unsigned long r0, unsigned long r19,
+ struct pt_regs *regs, struct k_sigaction *ka)
+{
+ switch (regs->r0) {
+ case ERESTARTSYS:
+ if (!(ka->sa.sa_flags & SA_RESTART)) {
+ case ERESTARTNOHAND:
+ regs->r0 = EINTR;
+ break;
+ }
+ /* fallthrough */
+ case ERESTARTNOINTR:
+ regs->r0 = r0; /* reset v0 and a3 and replay syscall */
+ regs->r19 = r19;
+ regs->pc -= 4;
+ break;
+ case ERESTART_RESTARTBLOCK:
+ current_thread_info()->restart_block.fn = do_no_restart_syscall;
+ regs->r0 = EINTR;
+ break;
+ }
+}
+
+
+/*
+ * Note that 'init' is a special process: it doesn't get signals it doesn't
+ * want to handle. Thus you cannot kill init even with a SIGKILL even by
+ * mistake.
+ *
+ * Note that we go through the signals twice: once to check the signals that
+ * the kernel can handle, and then we build all the user-level signal handling
+ * stack-frames in one go after that.
+ *
+ * "r0" and "r19" are the registers we need to restore for system call
+ * restart. "r0" is also used as an indicator whether we can restart at
+ * all (if we get here from anything but a syscall return, it will be 0)
+ */
+static int
+do_signal(sigset_t *oldset, struct pt_regs * regs, struct switch_stack * sw,
+ unsigned long r0, unsigned long r19)
+{
+ siginfo_t info;
+ int signr;
+ unsigned long single_stepping = ptrace_cancel_bpt(current);
+ struct k_sigaction ka;
+
+ if (!oldset)
+ oldset = &current->blocked;
+
+ /* This lets the debugger run, ... */
+ signr = get_signal_to_deliver(&info, &ka, regs, NULL);
+ /* ... so re-check the single stepping. */
+ single_stepping |= ptrace_cancel_bpt(current);
+
+ if (signr > 0) {
+ /* Whee! Actually deliver the signal. */
+ if (r0) syscall_restart(r0, r19, regs, &ka);
+ handle_signal(signr, &ka, &info, oldset, regs, sw);
+ if (single_stepping)
+ ptrace_set_bpt(current); /* re-set bpt */
+ return 1;
+ }
+
+ if (r0) {
+ switch (regs->r0) {
+ case ERESTARTNOHAND:
+ case ERESTARTSYS:
+ case ERESTARTNOINTR:
+ /* Reset v0 and a3 and replay syscall. */
+ regs->r0 = r0;
+ regs->r19 = r19;
+ regs->pc -= 4;
+ break;
+ case ERESTART_RESTARTBLOCK:
+ /* Force v0 to the restart syscall and reply. */
+ regs->r0 = __NR_restart_syscall;
+ regs->pc -= 4;
+ break;
+ }
+ }
+ if (single_stepping)
+ ptrace_set_bpt(current); /* re-set breakpoint */
+
+ return 0;
+}
+
+void
+do_notify_resume(sigset_t *oldset, struct pt_regs *regs,
+ struct switch_stack *sw, unsigned long r0,
+ unsigned long r19, unsigned long thread_info_flags)
+{
+ if (thread_info_flags & _TIF_SIGPENDING)
+ do_signal(oldset, regs, sw, r0, r19);
+}
diff --git a/arch/alpha/kernel/smc37c669.c b/arch/alpha/kernel/smc37c669.c
new file mode 100644
index 000000000000..fd467b207f0f
--- /dev/null
+++ b/arch/alpha/kernel/smc37c669.c
@@ -0,0 +1,2554 @@
+/*
+ * SMC 37C669 initialization code
+ */
+#include <linux/kernel.h>
+
+#include <linux/slab.h>
+#include <linux/mm.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/spinlock.h>
+
+#include <asm/hwrpb.h>
+#include <asm/io.h>
+#include <asm/segment.h>
+
+#if 0
+# define DBG_DEVS(args) printk args
+#else
+# define DBG_DEVS(args)
+#endif
+
+#define KB 1024
+#define MB (1024*KB)
+#define GB (1024*MB)
+
+#define SMC_DEBUG 0
+
+/* File: smcc669_def.h
+ *
+ * Copyright (C) 1997 by
+ * Digital Equipment Corporation, Maynard, Massachusetts.
+ * All rights reserved.
+ *
+ * This software is furnished under a license and may be used and copied
+ * only in accordance of the terms of such license and with the
+ * inclusion of the above copyright notice. This software or any other
+ * copies thereof may not be provided or otherwise made available to any
+ * other person. No title to and ownership of the software is hereby
+ * transferred.
+ *
+ * The information in this software is subject to change without notice
+ * and should not be construed as a commitment by Digital Equipment
+ * Corporation.
+ *
+ * Digital assumes no responsibility for the use or reliability of its
+ * software on equipment which is not supplied by Digital.
+ *
+ *
+ * Abstract:
+ *
+ * This file contains header definitions for the SMC37c669
+ * Super I/O controller.
+ *
+ * Author:
+ *
+ * Eric Rasmussen
+ *
+ * Modification History:
+ *
+ * er 28-Jan-1997 Initial Entry
+ */
+
+#ifndef __SMC37c669_H
+#define __SMC37c669_H
+
+/*
+** Macros for handling device IRQs
+**
+** The mask acts as a flag used in mapping actual ISA IRQs (0 - 15)
+** to device IRQs (A - H).
+*/
+#define SMC37c669_DEVICE_IRQ_MASK 0x80000000
+#define SMC37c669_DEVICE_IRQ( __i ) \
+ ((SMC37c669_DEVICE_IRQ_MASK) | (__i))
+#define SMC37c669_IS_DEVICE_IRQ(__i) \
+ (((__i) & (SMC37c669_DEVICE_IRQ_MASK)) == (SMC37c669_DEVICE_IRQ_MASK))
+#define SMC37c669_RAW_DEVICE_IRQ(__i) \
+ ((__i) & ~(SMC37c669_DEVICE_IRQ_MASK))
+
+/*
+** Macros for handling device DRQs
+**
+** The mask acts as a flag used in mapping actual ISA DMA
+** channels to device DMA channels (A - C).
+*/
+#define SMC37c669_DEVICE_DRQ_MASK 0x80000000
+#define SMC37c669_DEVICE_DRQ(__d) \
+ ((SMC37c669_DEVICE_DRQ_MASK) | (__d))
+#define SMC37c669_IS_DEVICE_DRQ(__d) \
+ (((__d) & (SMC37c669_DEVICE_DRQ_MASK)) == (SMC37c669_DEVICE_DRQ_MASK))
+#define SMC37c669_RAW_DEVICE_DRQ(__d) \
+ ((__d) & ~(SMC37c669_DEVICE_DRQ_MASK))
+
+#define SMC37c669_DEVICE_ID 0x3
+
+/*
+** SMC37c669 Device Function Definitions
+*/
+#define SERIAL_0 0
+#define SERIAL_1 1
+#define PARALLEL_0 2
+#define FLOPPY_0 3
+#define IDE_0 4
+#define NUM_FUNCS 5
+
+/*
+** Default Device Function Mappings
+*/
+#define COM1_BASE 0x3F8
+#define COM1_IRQ 4
+#define COM2_BASE 0x2F8
+#define COM2_IRQ 3
+#define PARP_BASE 0x3BC
+#define PARP_IRQ 7
+#define PARP_DRQ 3
+#define FDC_BASE 0x3F0
+#define FDC_IRQ 6
+#define FDC_DRQ 2
+
+/*
+** Configuration On/Off Key Definitions
+*/
+#define SMC37c669_CONFIG_ON_KEY 0x55
+#define SMC37c669_CONFIG_OFF_KEY 0xAA
+
+/*
+** SMC 37c669 Device IRQs
+*/
+#define SMC37c669_DEVICE_IRQ_A ( SMC37c669_DEVICE_IRQ( 0x01 ) )
+#define SMC37c669_DEVICE_IRQ_B ( SMC37c669_DEVICE_IRQ( 0x02 ) )
+#define SMC37c669_DEVICE_IRQ_C ( SMC37c669_DEVICE_IRQ( 0x03 ) )
+#define SMC37c669_DEVICE_IRQ_D ( SMC37c669_DEVICE_IRQ( 0x04 ) )
+#define SMC37c669_DEVICE_IRQ_E ( SMC37c669_DEVICE_IRQ( 0x05 ) )
+#define SMC37c669_DEVICE_IRQ_F ( SMC37c669_DEVICE_IRQ( 0x06 ) )
+/* SMC37c669_DEVICE_IRQ_G *** RESERVED ***/
+#define SMC37c669_DEVICE_IRQ_H ( SMC37c669_DEVICE_IRQ( 0x08 ) )
+
+/*
+** SMC 37c669 Device DMA Channel Definitions
+*/
+#define SMC37c669_DEVICE_DRQ_A ( SMC37c669_DEVICE_DRQ( 0x01 ) )
+#define SMC37c669_DEVICE_DRQ_B ( SMC37c669_DEVICE_DRQ( 0x02 ) )
+#define SMC37c669_DEVICE_DRQ_C ( SMC37c669_DEVICE_DRQ( 0x03 ) )
+
+/*
+** Configuration Register Index Definitions
+*/
+#define SMC37c669_CR00_INDEX 0x00
+#define SMC37c669_CR01_INDEX 0x01
+#define SMC37c669_CR02_INDEX 0x02
+#define SMC37c669_CR03_INDEX 0x03
+#define SMC37c669_CR04_INDEX 0x04
+#define SMC37c669_CR05_INDEX 0x05
+#define SMC37c669_CR06_INDEX 0x06
+#define SMC37c669_CR07_INDEX 0x07
+#define SMC37c669_CR08_INDEX 0x08
+#define SMC37c669_CR09_INDEX 0x09
+#define SMC37c669_CR0A_INDEX 0x0A
+#define SMC37c669_CR0B_INDEX 0x0B
+#define SMC37c669_CR0C_INDEX 0x0C
+#define SMC37c669_CR0D_INDEX 0x0D
+#define SMC37c669_CR0E_INDEX 0x0E
+#define SMC37c669_CR0F_INDEX 0x0F
+#define SMC37c669_CR10_INDEX 0x10
+#define SMC37c669_CR11_INDEX 0x11
+#define SMC37c669_CR12_INDEX 0x12
+#define SMC37c669_CR13_INDEX 0x13
+#define SMC37c669_CR14_INDEX 0x14
+#define SMC37c669_CR15_INDEX 0x15
+#define SMC37c669_CR16_INDEX 0x16
+#define SMC37c669_CR17_INDEX 0x17
+#define SMC37c669_CR18_INDEX 0x18
+#define SMC37c669_CR19_INDEX 0x19
+#define SMC37c669_CR1A_INDEX 0x1A
+#define SMC37c669_CR1B_INDEX 0x1B
+#define SMC37c669_CR1C_INDEX 0x1C
+#define SMC37c669_CR1D_INDEX 0x1D
+#define SMC37c669_CR1E_INDEX 0x1E
+#define SMC37c669_CR1F_INDEX 0x1F
+#define SMC37c669_CR20_INDEX 0x20
+#define SMC37c669_CR21_INDEX 0x21
+#define SMC37c669_CR22_INDEX 0x22
+#define SMC37c669_CR23_INDEX 0x23
+#define SMC37c669_CR24_INDEX 0x24
+#define SMC37c669_CR25_INDEX 0x25
+#define SMC37c669_CR26_INDEX 0x26
+#define SMC37c669_CR27_INDEX 0x27
+#define SMC37c669_CR28_INDEX 0x28
+#define SMC37c669_CR29_INDEX 0x29
+
+/*
+** Configuration Register Alias Definitions
+*/
+#define SMC37c669_DEVICE_ID_INDEX SMC37c669_CR0D_INDEX
+#define SMC37c669_DEVICE_REVISION_INDEX SMC37c669_CR0E_INDEX
+#define SMC37c669_FDC_BASE_ADDRESS_INDEX SMC37c669_CR20_INDEX
+#define SMC37c669_IDE_BASE_ADDRESS_INDEX SMC37c669_CR21_INDEX
+#define SMC37c669_IDE_ALTERNATE_ADDRESS_INDEX SMC37c669_CR22_INDEX
+#define SMC37c669_PARALLEL0_BASE_ADDRESS_INDEX SMC37c669_CR23_INDEX
+#define SMC37c669_SERIAL0_BASE_ADDRESS_INDEX SMC37c669_CR24_INDEX
+#define SMC37c669_SERIAL1_BASE_ADDRESS_INDEX SMC37c669_CR25_INDEX
+#define SMC37c669_PARALLEL_FDC_DRQ_INDEX SMC37c669_CR26_INDEX
+#define SMC37c669_PARALLEL_FDC_IRQ_INDEX SMC37c669_CR27_INDEX
+#define SMC37c669_SERIAL_IRQ_INDEX SMC37c669_CR28_INDEX
+
+/*
+** Configuration Register Definitions
+**
+** The INDEX (write only) and DATA (read/write) ports are effective
+** only when the chip is in the Configuration State.
+*/
+typedef struct _SMC37c669_CONFIG_REGS {
+ unsigned char index_port;
+ unsigned char data_port;
+} SMC37c669_CONFIG_REGS;
+
+/*
+** CR00 - default value 0x28
+**
+** IDE_EN (CR00<1:0>):
+** 0x - 30ua pull-ups on nIDEEN, nHDCS0, NHDCS1
+** 11 - IRQ_H available as IRQ output,
+** IRRX2, IRTX2 available as alternate IR pins
+** 10 - nIDEEN, nHDCS0, nHDCS1 used to control IDE
+**
+** VALID (CR00<7>):
+** A high level on this software controlled bit can
+** be used to indicate that a valid configuration
+** cycle has occurred. The control software must
+** take care to set this bit at the appropriate times.
+** Set to zero after power up. This bit has no
+** effect on any other hardware in the chip.
+**
+*/
+typedef union _SMC37c669_CR00 {
+ unsigned char as_uchar;
+ struct {
+ unsigned ide_en : 2; /* See note above */
+ unsigned reserved1 : 1; /* RAZ */
+ unsigned fdc_pwr : 1; /* 1 = supply power to FDC */
+ unsigned reserved2 : 3; /* Read as 010b */
+ unsigned valid : 1; /* See note above */
+ } by_field;
+} SMC37c669_CR00;
+
+/*
+** CR01 - default value 0x9C
+*/
+typedef union _SMC37c669_CR01 {
+ unsigned char as_uchar;
+ struct {
+ unsigned reserved1 : 2; /* RAZ */
+ unsigned ppt_pwr : 1; /* 1 = supply power to PPT */
+ unsigned ppt_mode : 1; /* 1 = Printer mode, 0 = EPP */
+ unsigned reserved2 : 1; /* Read as 1 */
+ unsigned reserved3 : 2; /* RAZ */
+ unsigned lock_crx: 1; /* Lock CR00 - CR18 */
+ } by_field;
+} SMC37c669_CR01;
+
+/*
+** CR02 - default value 0x88
+*/
+typedef union _SMC37c669_CR02 {
+ unsigned char as_uchar;
+ struct {
+ unsigned reserved1 : 3; /* RAZ */
+ unsigned uart1_pwr : 1; /* 1 = supply power to UART1 */
+ unsigned reserved2 : 3; /* RAZ */
+ unsigned uart2_pwr : 1; /* 1 = supply power to UART2 */
+ } by_field;
+} SMC37c669_CR02;
+
+/*
+** CR03 - default value 0x78
+**
+** CR03<7> CR03<2> Pin 94
+** ------- ------- ------
+** 0 X DRV2 (input)
+** 1 0 ADRX
+** 1 1 IRQ_B
+**
+** CR03<6> CR03<5> Op Mode
+** ------- ------- -------
+** 0 0 Model 30
+** 0 1 PS/2
+** 1 0 Reserved
+** 1 1 AT Mode
+*/
+typedef union _SMC37c669_CR03 {
+ unsigned char as_uchar;
+ struct {
+ unsigned pwrgd_gamecs : 1; /* 1 = PWRGD, 0 = GAMECS */
+ unsigned fdc_mode2 : 1; /* 1 = Enhanced Mode 2 */
+ unsigned pin94_0 : 1; /* See note above */
+ unsigned reserved1 : 1; /* RAZ */
+ unsigned drvden : 1; /* 1 = high, 0 - output */
+ unsigned op_mode : 2; /* See note above */
+ unsigned pin94_1 : 1; /* See note above */
+ } by_field;
+} SMC37c669_CR03;
+
+/*
+** CR04 - default value 0x00
+**
+** PP_EXT_MODE:
+** If CR01<PP_MODE> = 0 and PP_EXT_MODE =
+** 00 - Standard and Bidirectional
+** 01 - EPP mode and SPP
+** 10 - ECP mode
+** In this mode, 2 drives can be supported
+** directly, 3 or 4 drives must use external
+** 4 drive support. SPP can be selected
+** through the ECR register of ECP as mode 000.
+** 11 - ECP mode and EPP mode
+** In this mode, 2 drives can be supported
+** directly, 3 or 4 drives must use external
+** 4 drive support. SPP can be selected
+** through the ECR register of ECP as mode 000.
+** In this mode, EPP can be selected through
+** the ECR register of ECP as mode 100.
+**
+** PP_FDC:
+** 00 - Normal
+** 01 - PPFD1
+** 10 - PPFD2
+** 11 - Reserved
+**
+** MIDI1:
+** Serial Clock Select:
+** A low level on this bit disables MIDI support,
+** clock = divide by 13. A high level on this
+** bit enables MIDI support, clock = divide by 12.
+**
+** MIDI operates at 31.25 Kbps which can be derived
+** from 125 KHz (24 MHz / 12 = 2 MHz, 2 MHz / 16 = 125 KHz)
+**
+** ALT_IO:
+** 0 - Use pins IRRX, IRTX
+** 1 - Use pins IRRX2, IRTX2
+**
+** If this bit is set, the IR receive and transmit
+** functions will not be available on pins 25 and 26
+** unless CR00<IDE_EN> = 11.
+*/
+typedef union _SMC37c669_CR04 {
+ unsigned char as_uchar;
+ struct {
+ unsigned ppt_ext_mode : 2; /* See note above */
+ unsigned ppt_fdc : 2; /* See note above */
+ unsigned midi1 : 1; /* See note above */
+ unsigned midi2 : 1; /* See note above */
+ unsigned epp_type : 1; /* 0 = EPP 1.9, 1 = EPP 1.7 */
+ unsigned alt_io : 1; /* See note above */
+ } by_field;
+} SMC37c669_CR04;
+
+/*
+** CR05 - default value 0x00
+**
+** DEN_SEL:
+** 00 - Densel output normal
+** 01 - Reserved
+** 10 - Densel output 1
+** 11 - Densel output 0
+**
+*/
+typedef union _SMC37c669_CR05 {
+ unsigned char as_uchar;
+ struct {
+ unsigned reserved1 : 2; /* RAZ */
+ unsigned fdc_dma_mode : 1; /* 0 = burst, 1 = non-burst */
+ unsigned den_sel : 2; /* See note above */
+ unsigned swap_drv : 1; /* Swap the FDC motor selects */
+ unsigned extx4 : 1; /* 0 = 2 drive, 1 = external 4 drive decode */
+ unsigned reserved2 : 1; /* RAZ */
+ } by_field;
+} SMC37c669_CR05;
+
+/*
+** CR06 - default value 0xFF
+*/
+typedef union _SMC37c669_CR06 {
+ unsigned char as_uchar;
+ struct {
+ unsigned floppy_a : 2; /* Type of floppy drive A */
+ unsigned floppy_b : 2; /* Type of floppy drive B */
+ unsigned floppy_c : 2; /* Type of floppy drive C */
+ unsigned floppy_d : 2; /* Type of floppy drive D */
+ } by_field;
+} SMC37c669_CR06;
+
+/*
+** CR07 - default value 0x00
+**
+** Auto Power Management CR07<7:4>:
+** 0 - Auto Powerdown disabled (default)
+** 1 - Auto Powerdown enabled
+**
+** This bit is reset to the default state by POR or
+** a hardware reset.
+**
+*/
+typedef union _SMC37c669_CR07 {
+ unsigned char as_uchar;
+ struct {
+ unsigned floppy_boot : 2; /* 0 = A:, 1 = B: */
+ unsigned reserved1 : 2; /* RAZ */
+ unsigned ppt_en : 1; /* See note above */
+ unsigned uart1_en : 1; /* See note above */
+ unsigned uart2_en : 1; /* See note above */
+ unsigned fdc_en : 1; /* See note above */
+ } by_field;
+} SMC37c669_CR07;
+
+/*
+** CR08 - default value 0x00
+*/
+typedef union _SMC37c669_CR08 {
+ unsigned char as_uchar;
+ struct {
+ unsigned zero : 4; /* 0 */
+ unsigned addrx7_4 : 4; /* ADR<7:3> for ADRx decode */
+ } by_field;
+} SMC37c669_CR08;
+
+/*
+** CR09 - default value 0x00
+**
+** ADRx_CONFIG:
+** 00 - ADRx disabled
+** 01 - 1 byte decode A<3:0> = 0000b
+** 10 - 8 byte block decode A<3:0> = 0XXXb
+** 11 - 16 byte block decode A<3:0> = XXXXb
+**
+*/
+typedef union _SMC37c669_CR09 {
+ unsigned char as_uchar;
+ struct {
+ unsigned adra8 : 3; /* ADR<10:8> for ADRx decode */
+ unsigned reserved1 : 3;
+ unsigned adrx_config : 2; /* See note above */
+ } by_field;
+} SMC37c669_CR09;
+
+/*
+** CR0A - default value 0x00
+*/
+typedef union _SMC37c669_CR0A {
+ unsigned char as_uchar;
+ struct {
+ unsigned ecp_fifo_threshold : 4;
+ unsigned reserved1 : 4;
+ } by_field;
+} SMC37c669_CR0A;
+
+/*
+** CR0B - default value 0x00
+*/
+typedef union _SMC37c669_CR0B {
+ unsigned char as_uchar;
+ struct {
+ unsigned fdd0_drtx : 2; /* FDD0 Data Rate Table */
+ unsigned fdd1_drtx : 2; /* FDD1 Data Rate Table */
+ unsigned fdd2_drtx : 2; /* FDD2 Data Rate Table */
+ unsigned fdd3_drtx : 2; /* FDD3 Data Rate Table */
+ } by_field;
+} SMC37c669_CR0B;
+
+/*
+** CR0C - default value 0x00
+**
+** UART2_MODE:
+** 000 - Standard (default)
+** 001 - IrDA (HPSIR)
+** 010 - Amplitude Shift Keyed IR @500 KHz
+** 011 - Reserved
+** 1xx - Reserved
+**
+*/
+typedef union _SMC37c669_CR0C {
+ unsigned char as_uchar;
+ struct {
+ unsigned uart2_rcv_polarity : 1; /* 1 = invert RX */
+ unsigned uart2_xmit_polarity : 1; /* 1 = invert TX */
+ unsigned uart2_duplex : 1; /* 1 = full, 0 = half */
+ unsigned uart2_mode : 3; /* See note above */
+ unsigned uart1_speed : 1; /* 1 = high speed enabled */
+ unsigned uart2_speed : 1; /* 1 = high speed enabled */
+ } by_field;
+} SMC37c669_CR0C;
+
+/*
+** CR0D - default value 0x03
+**
+** Device ID Register - read only
+*/
+typedef union _SMC37c669_CR0D {
+ unsigned char as_uchar;
+ struct {
+ unsigned device_id : 8; /* Returns 0x3 in this field */
+ } by_field;
+} SMC37c669_CR0D;
+
+/*
+** CR0E - default value 0x02
+**
+** Device Revision Register - read only
+*/
+typedef union _SMC37c669_CR0E {
+ unsigned char as_uchar;
+ struct {
+ unsigned device_rev : 8; /* Returns 0x2 in this field */
+ } by_field;
+} SMC37c669_CR0E;
+
+/*
+** CR0F - default value 0x00
+*/
+typedef union _SMC37c669_CR0F {
+ unsigned char as_uchar;
+ struct {
+ unsigned test0 : 1; /* Reserved - set to 0 */
+ unsigned test1 : 1; /* Reserved - set to 0 */
+ unsigned test2 : 1; /* Reserved - set to 0 */
+ unsigned test3 : 1; /* Reserved - set t0 0 */
+ unsigned test4 : 1; /* Reserved - set to 0 */
+ unsigned test5 : 1; /* Reserved - set t0 0 */
+ unsigned test6 : 1; /* Reserved - set t0 0 */
+ unsigned test7 : 1; /* Reserved - set to 0 */
+ } by_field;
+} SMC37c669_CR0F;
+
+/*
+** CR10 - default value 0x00
+*/
+typedef union _SMC37c669_CR10 {
+ unsigned char as_uchar;
+ struct {
+ unsigned reserved1 : 3; /* RAZ */
+ unsigned pll_gain : 1; /* 1 = 3V, 2 = 5V operation */
+ unsigned pll_stop : 1; /* 1 = stop PLLs */
+ unsigned ace_stop : 1; /* 1 = stop UART clocks */
+ unsigned pll_clock_ctrl : 1; /* 0 = 14.318 MHz, 1 = 24 MHz */
+ unsigned ir_test : 1; /* Enable IR test mode */
+ } by_field;
+} SMC37c669_CR10;
+
+/*
+** CR11 - default value 0x00
+*/
+typedef union _SMC37c669_CR11 {
+ unsigned char as_uchar;
+ struct {
+ unsigned ir_loopback : 1; /* Internal IR loop back */
+ unsigned test_10ms : 1; /* Test 10ms autopowerdown FDC timeout */
+ unsigned reserved1 : 6; /* RAZ */
+ } by_field;
+} SMC37c669_CR11;
+
+/*
+** CR12 - CR1D are reserved registers
+*/
+
+/*
+** CR1E - default value 0x80
+**
+** GAMECS:
+** 00 - GAMECS disabled
+** 01 - 1 byte decode ADR<3:0> = 0001b
+** 10 - 8 byte block decode ADR<3:0> = 0XXXb
+** 11 - 16 byte block decode ADR<3:0> = XXXXb
+**
+*/
+typedef union _SMC37c66_CR1E {
+ unsigned char as_uchar;
+ struct {
+ unsigned gamecs_config: 2; /* See note above */
+ unsigned gamecs_addr9_4 : 6; /* GAMECS Addr<9:4> */
+ } by_field;
+} SMC37c669_CR1E;
+
+/*
+** CR1F - default value 0x00
+**
+** DT0 DT1 DRVDEN0 DRVDEN1 Drive Type
+** --- --- ------- ------- ----------
+** 0 0 DENSEL DRATE0 4/2/1 MB 3.5"
+** 2/1 MB 5.25"
+** 2/1.6/1 MB 3.5" (3-mode)
+** 0 1 DRATE1 DRATE0
+** 1 0 nDENSEL DRATE0 PS/2
+** 1 1 DRATE0 DRATE1
+**
+** Note: DENSEL, DRATE1, and DRATE0 map onto two output
+** pins - DRVDEN0 and DRVDEN1.
+**
+*/
+typedef union _SMC37c669_CR1F {
+ unsigned char as_uchar;
+ struct {
+ unsigned fdd0_drive_type : 2; /* FDD0 drive type */
+ unsigned fdd1_drive_type : 2; /* FDD1 drive type */
+ unsigned fdd2_drive_type : 2; /* FDD2 drive type */
+ unsigned fdd3_drive_type : 2; /* FDD3 drive type */
+ } by_field;
+} SMC37c669_CR1F;
+
+/*
+** CR20 - default value 0x3C
+**
+** FDC Base Address Register
+** - To disable this decode set Addr<9:8> = 0
+** - A<10> = 0, A<3:0> = 0XXXb to access.
+**
+*/
+typedef union _SMC37c669_CR20 {
+ unsigned char as_uchar;
+ struct {
+ unsigned zero : 2; /* 0 */
+ unsigned addr9_4 : 6; /* FDC Addr<9:4> */
+ } by_field;
+} SMC37c669_CR20;
+
+/*
+** CR21 - default value 0x3C
+**
+** IDE Base Address Register
+** - To disable this decode set Addr<9:8> = 0
+** - A<10> = 0, A<3:0> = 0XXXb to access.
+**
+*/
+typedef union _SMC37c669_CR21 {
+ unsigned char as_uchar;
+ struct {
+ unsigned zero : 2; /* 0 */
+ unsigned addr9_4 : 6; /* IDE Addr<9:4> */
+ } by_field;
+} SMC37c669_CR21;
+
+/*
+** CR22 - default value 0x3D
+**
+** IDE Alternate Status Base Address Register
+** - To disable this decode set Addr<9:8> = 0
+** - A<10> = 0, A<3:0> = 0110b to access.
+**
+*/
+typedef union _SMC37c669_CR22 {
+ unsigned char as_uchar;
+ struct {
+ unsigned zero : 2; /* 0 */
+ unsigned addr9_4 : 6; /* IDE Alt Status Addr<9:4> */
+ } by_field;
+} SMC37c669_CR22;
+
+/*
+** CR23 - default value 0x00
+**
+** Parallel Port Base Address Register
+** - To disable this decode set Addr<9:8> = 0
+** - A<10> = 0 to access.
+** - If EPP is enabled, A<2:0> = XXXb to access.
+** If EPP is NOT enabled, A<1:0> = XXb to access
+**
+*/
+typedef union _SMC37c669_CR23 {
+ unsigned char as_uchar;
+ struct {
+ unsigned addr9_2 : 8; /* Parallel Port Addr<9:2> */
+ } by_field;
+} SMC37c669_CR23;
+
+/*
+** CR24 - default value 0x00
+**
+** UART1 Base Address Register
+** - To disable this decode set Addr<9:8> = 0
+** - A<10> = 0, A<2:0> = XXXb to access.
+**
+*/
+typedef union _SMC37c669_CR24 {
+ unsigned char as_uchar;
+ struct {
+ unsigned zero : 1; /* 0 */
+ unsigned addr9_3 : 7; /* UART1 Addr<9:3> */
+ } by_field;
+} SMC37c669_CR24;
+
+/*
+** CR25 - default value 0x00
+**
+** UART2 Base Address Register
+** - To disable this decode set Addr<9:8> = 0
+** - A<10> = 0, A<2:0> = XXXb to access.
+**
+*/
+typedef union _SMC37c669_CR25 {
+ unsigned char as_uchar;
+ struct {
+ unsigned zero : 1; /* 0 */
+ unsigned addr9_3 : 7; /* UART2 Addr<9:3> */
+ } by_field;
+} SMC37c669_CR25;
+
+/*
+** CR26 - default value 0x00
+**
+** Parallel Port / FDC DMA Select Register
+**
+** D3 - D0 DMA
+** D7 - D4 Selected
+** ------- --------
+** 0000 None
+** 0001 DMA_A
+** 0010 DMA_B
+** 0011 DMA_C
+**
+*/
+typedef union _SMC37c669_CR26 {
+ unsigned char as_uchar;
+ struct {
+ unsigned ppt_drq : 4; /* See note above */
+ unsigned fdc_drq : 4; /* See note above */
+ } by_field;
+} SMC37c669_CR26;
+
+/*
+** CR27 - default value 0x00
+**
+** Parallel Port / FDC IRQ Select Register
+**
+** D3 - D0 IRQ
+** D7 - D4 Selected
+** ------- --------
+** 0000 None
+** 0001 IRQ_A
+** 0010 IRQ_B
+** 0011 IRQ_C
+** 0100 IRQ_D
+** 0101 IRQ_E
+** 0110 IRQ_F
+** 0111 Reserved
+** 1000 IRQ_H
+**
+** Any unselected IRQ REQ is in tristate
+**
+*/
+typedef union _SMC37c669_CR27 {
+ unsigned char as_uchar;
+ struct {
+ unsigned ppt_irq : 4; /* See note above */
+ unsigned fdc_irq : 4; /* See note above */
+ } by_field;
+} SMC37c669_CR27;
+
+/*
+** CR28 - default value 0x00
+**
+** UART IRQ Select Register
+**
+** D3 - D0 IRQ
+** D7 - D4 Selected
+** ------- --------
+** 0000 None
+** 0001 IRQ_A
+** 0010 IRQ_B
+** 0011 IRQ_C
+** 0100 IRQ_D
+** 0101 IRQ_E
+** 0110 IRQ_F
+** 0111 Reserved
+** 1000 IRQ_H
+** 1111 share with UART1 (only for UART2)
+**
+** Any unselected IRQ REQ is in tristate
+**
+** To share an IRQ between UART1 and UART2, set
+** UART1 to use the desired IRQ and set UART2 to
+** 0xF to enable sharing mechanism.
+**
+*/
+typedef union _SMC37c669_CR28 {
+ unsigned char as_uchar;
+ struct {
+ unsigned uart2_irq : 4; /* See note above */
+ unsigned uart1_irq : 4; /* See note above */
+ } by_field;
+} SMC37c669_CR28;
+
+/*
+** CR29 - default value 0x00
+**
+** IRQIN IRQ Select Register
+**
+** D3 - D0 IRQ
+** D7 - D4 Selected
+** ------- --------
+** 0000 None
+** 0001 IRQ_A
+** 0010 IRQ_B
+** 0011 IRQ_C
+** 0100 IRQ_D
+** 0101 IRQ_E
+** 0110 IRQ_F
+** 0111 Reserved
+** 1000 IRQ_H
+**
+** Any unselected IRQ REQ is in tristate
+**
+*/
+typedef union _SMC37c669_CR29 {
+ unsigned char as_uchar;
+ struct {
+ unsigned irqin_irq : 4; /* See note above */
+ unsigned reserved1 : 4; /* RAZ */
+ } by_field;
+} SMC37c669_CR29;
+
+/*
+** Aliases of Configuration Register formats (should match
+** the set of index aliases).
+**
+** Note that CR24 and CR25 have the same format and are the
+** base address registers for UART1 and UART2. Because of
+** this we only define 1 alias here - for CR24 - as the serial
+** base address register.
+**
+** Note that CR21 and CR22 have the same format and are the
+** base address and alternate status address registers for
+** the IDE controller. Because of this we only define 1 alias
+** here - for CR21 - as the IDE address register.
+**
+*/
+typedef SMC37c669_CR0D SMC37c669_DEVICE_ID_REGISTER;
+typedef SMC37c669_CR0E SMC37c669_DEVICE_REVISION_REGISTER;
+typedef SMC37c669_CR20 SMC37c669_FDC_BASE_ADDRESS_REGISTER;
+typedef SMC37c669_CR21 SMC37c669_IDE_ADDRESS_REGISTER;
+typedef SMC37c669_CR23 SMC37c669_PARALLEL_BASE_ADDRESS_REGISTER;
+typedef SMC37c669_CR24 SMC37c669_SERIAL_BASE_ADDRESS_REGISTER;
+typedef SMC37c669_CR26 SMC37c669_PARALLEL_FDC_DRQ_REGISTER;
+typedef SMC37c669_CR27 SMC37c669_PARALLEL_FDC_IRQ_REGISTER;
+typedef SMC37c669_CR28 SMC37c669_SERIAL_IRQ_REGISTER;
+
+/*
+** ISA/Device IRQ Translation Table Entry Definition
+*/
+typedef struct _SMC37c669_IRQ_TRANSLATION_ENTRY {
+ int device_irq;
+ int isa_irq;
+} SMC37c669_IRQ_TRANSLATION_ENTRY;
+
+/*
+** ISA/Device DMA Translation Table Entry Definition
+*/
+typedef struct _SMC37c669_DRQ_TRANSLATION_ENTRY {
+ int device_drq;
+ int isa_drq;
+} SMC37c669_DRQ_TRANSLATION_ENTRY;
+
+/*
+** External Interface Function Prototype Declarations
+*/
+
+SMC37c669_CONFIG_REGS *SMC37c669_detect(
+ int
+);
+
+unsigned int SMC37c669_enable_device(
+ unsigned int func
+);
+
+unsigned int SMC37c669_disable_device(
+ unsigned int func
+);
+
+unsigned int SMC37c669_configure_device(
+ unsigned int func,
+ int port,
+ int irq,
+ int drq
+);
+
+void SMC37c669_display_device_info(
+ void
+);
+
+#endif /* __SMC37c669_H */
+
+/* file: smcc669.c
+ *
+ * Copyright (C) 1997 by
+ * Digital Equipment Corporation, Maynard, Massachusetts.
+ * All rights reserved.
+ *
+ * This software is furnished under a license and may be used and copied
+ * only in accordance of the terms of such license and with the
+ * inclusion of the above copyright notice. This software or any other
+ * copies thereof may not be provided or otherwise made available to any
+ * other person. No title to and ownership of the software is hereby
+ * transferred.
+ *
+ * The information in this software is subject to change without notice
+ * and should not be construed as a commitment by digital equipment
+ * corporation.
+ *
+ * Digital assumes no responsibility for the use or reliability of its
+ * software on equipment which is not supplied by digital.
+ */
+
+/*
+ *++
+ * FACILITY:
+ *
+ * Alpha SRM Console Firmware
+ *
+ * MODULE DESCRIPTION:
+ *
+ * SMC37c669 Super I/O controller configuration routines.
+ *
+ * AUTHORS:
+ *
+ * Eric Rasmussen
+ *
+ * CREATION DATE:
+ *
+ * 28-Jan-1997
+ *
+ * MODIFICATION HISTORY:
+ *
+ * er 01-May-1997 Fixed pointer conversion errors in
+ * SMC37c669_get_device_config().
+ * er 28-Jan-1997 Initial version.
+ *
+ *--
+ */
+#if 0
+/* $INCLUDE_OPTIONS$ */
+#include "cp$inc:platform_io.h"
+/* $INCLUDE_OPTIONS_END$ */
+#include "cp$src:common.h"
+#include "cp$inc:prototypes.h"
+#include "cp$src:kernel_def.h"
+#include "cp$src:msg_def.h"
+#include "cp$src:smcc669_def.h"
+/* Platform-specific includes */
+#include "cp$src:platform.h"
+#endif
+
+#ifndef TRUE
+#define TRUE 1
+#endif
+#ifndef FALSE
+#define FALSE 0
+#endif
+
+#define wb( _x_, _y_ ) outb( _y_, (unsigned int)((unsigned long)_x_) )
+#define rb( _x_ ) inb( (unsigned int)((unsigned long)_x_) )
+
+/*
+** Local storage for device configuration information.
+**
+** Since the SMC37c669 does not provide an explicit
+** mechanism for enabling/disabling individual device
+** functions, other than unmapping the device, local
+** storage for device configuration information is
+** allocated here for use in implementing our own
+** function enable/disable scheme.
+*/
+static struct DEVICE_CONFIG {
+ unsigned int port1;
+ unsigned int port2;
+ int irq;
+ int drq;
+} local_config [NUM_FUNCS];
+
+/*
+** List of all possible addresses for the Super I/O chip
+*/
+static unsigned long SMC37c669_Addresses[] __initdata =
+ {
+ 0x3F0UL, /* Primary address */
+ 0x370UL, /* Secondary address */
+ 0UL /* End of list */
+ };
+
+/*
+** Global Pointer to the Super I/O device
+*/
+static SMC37c669_CONFIG_REGS *SMC37c669 __initdata = NULL;
+
+/*
+** IRQ Translation Table
+**
+** The IRQ translation table is a list of SMC37c669 device
+** and standard ISA IRQs.
+**
+*/
+static SMC37c669_IRQ_TRANSLATION_ENTRY *SMC37c669_irq_table __initdata;
+
+/*
+** The following definition is for the default IRQ
+** translation table.
+*/
+static SMC37c669_IRQ_TRANSLATION_ENTRY SMC37c669_default_irq_table[]
+__initdata =
+ {
+ { SMC37c669_DEVICE_IRQ_A, -1 },
+ { SMC37c669_DEVICE_IRQ_B, -1 },
+ { SMC37c669_DEVICE_IRQ_C, 7 },
+ { SMC37c669_DEVICE_IRQ_D, 6 },
+ { SMC37c669_DEVICE_IRQ_E, 4 },
+ { SMC37c669_DEVICE_IRQ_F, 3 },
+ { SMC37c669_DEVICE_IRQ_H, -1 },
+ { -1, -1 } /* End of table */
+ };
+
+/*
+** The following definition is for the MONET (XP1000) IRQ
+** translation table.
+*/
+static SMC37c669_IRQ_TRANSLATION_ENTRY SMC37c669_monet_irq_table[]
+__initdata =
+ {
+ { SMC37c669_DEVICE_IRQ_A, -1 },
+ { SMC37c669_DEVICE_IRQ_B, -1 },
+ { SMC37c669_DEVICE_IRQ_C, 6 },
+ { SMC37c669_DEVICE_IRQ_D, 7 },
+ { SMC37c669_DEVICE_IRQ_E, 4 },
+ { SMC37c669_DEVICE_IRQ_F, 3 },
+ { SMC37c669_DEVICE_IRQ_H, -1 },
+ { -1, -1 } /* End of table */
+ };
+
+static SMC37c669_IRQ_TRANSLATION_ENTRY *SMC37c669_irq_tables[] __initdata =
+ {
+ SMC37c669_default_irq_table,
+ SMC37c669_monet_irq_table
+ };
+
+/*
+** DRQ Translation Table
+**
+** The DRQ translation table is a list of SMC37c669 device and
+** ISA DMA channels.
+**
+*/
+static SMC37c669_DRQ_TRANSLATION_ENTRY *SMC37c669_drq_table __initdata;
+
+/*
+** The following definition is the default DRQ
+** translation table.
+*/
+static SMC37c669_DRQ_TRANSLATION_ENTRY SMC37c669_default_drq_table[]
+__initdata =
+ {
+ { SMC37c669_DEVICE_DRQ_A, 2 },
+ { SMC37c669_DEVICE_DRQ_B, 3 },
+ { SMC37c669_DEVICE_DRQ_C, -1 },
+ { -1, -1 } /* End of table */
+ };
+
+/*
+** Local Function Prototype Declarations
+*/
+
+static unsigned int SMC37c669_is_device_enabled(
+ unsigned int func
+);
+
+#if 0
+static unsigned int SMC37c669_get_device_config(
+ unsigned int func,
+ int *port,
+ int *irq,
+ int *drq
+);
+#endif
+
+static void SMC37c669_config_mode(
+ unsigned int enable
+);
+
+static unsigned char SMC37c669_read_config(
+ unsigned char index
+);
+
+static void SMC37c669_write_config(
+ unsigned char index,
+ unsigned char data
+);
+
+static void SMC37c669_init_local_config( void );
+
+static struct DEVICE_CONFIG *SMC37c669_get_config(
+ unsigned int func
+);
+
+static int SMC37c669_xlate_irq(
+ int irq
+);
+
+static int SMC37c669_xlate_drq(
+ int drq
+);
+
+static __cacheline_aligned DEFINE_SPINLOCK(smc_lock);
+
+/*
+**++
+** FUNCTIONAL DESCRIPTION:
+**
+** This function detects the presence of an SMC37c669 Super I/O
+** controller.
+**
+** FORMAL PARAMETERS:
+**
+** None
+**
+** RETURN VALUE:
+**
+** Returns a pointer to the device if found, otherwise,
+** the NULL pointer is returned.
+**
+** SIDE EFFECTS:
+**
+** None
+**
+**--
+*/
+SMC37c669_CONFIG_REGS * __init SMC37c669_detect( int index )
+{
+ int i;
+ SMC37c669_DEVICE_ID_REGISTER id;
+
+ for ( i = 0; SMC37c669_Addresses[i] != 0; i++ ) {
+/*
+** Initialize the device pointer even though we don't yet know if
+** the controller is at this address. The support functions access
+** the controller through this device pointer so we need to set it
+** even when we are looking ...
+*/
+ SMC37c669 = ( SMC37c669_CONFIG_REGS * )SMC37c669_Addresses[i];
+/*
+** Enter configuration mode
+*/
+ SMC37c669_config_mode( TRUE );
+/*
+** Read the device id
+*/
+ id.as_uchar = SMC37c669_read_config( SMC37c669_DEVICE_ID_INDEX );
+/*
+** Exit configuration mode
+*/
+ SMC37c669_config_mode( FALSE );
+/*
+** Does the device id match? If so, assume we have found an
+** SMC37c669 controller at this address.
+*/
+ if ( id.by_field.device_id == SMC37c669_DEVICE_ID ) {
+/*
+** Initialize the IRQ and DRQ translation tables.
+*/
+ SMC37c669_irq_table = SMC37c669_irq_tables[ index ];
+ SMC37c669_drq_table = SMC37c669_default_drq_table;
+/*
+** erfix
+**
+** If the platform can't use the IRQ and DRQ defaults set up in this
+** file, it should call a platform-specific external routine at this
+** point to reset the IRQ and DRQ translation table pointers to point
+** at the appropriate tables for the platform. If the defaults are
+** acceptable, then the external routine should do nothing.
+*/
+
+/*
+** Put the chip back into configuration mode
+*/
+ SMC37c669_config_mode( TRUE );
+/*
+** Initialize local storage for configuration information
+*/
+ SMC37c669_init_local_config( );
+/*
+** Exit configuration mode
+*/
+ SMC37c669_config_mode( FALSE );
+/*
+** SMC37c669 controller found, break out of search loop
+*/
+ break;
+ }
+ else {
+/*
+** Otherwise, we did not find an SMC37c669 controller at this
+** address so set the device pointer to NULL.
+*/
+ SMC37c669 = NULL;
+ }
+ }
+ return SMC37c669;
+}
+
+
+/*
+**++
+** FUNCTIONAL DESCRIPTION:
+**
+** This function enables an SMC37c669 device function.
+**
+** FORMAL PARAMETERS:
+**
+** func:
+** Which device function to enable
+**
+** RETURN VALUE:
+**
+** Returns TRUE is the device function was enabled, otherwise, FALSE
+**
+** SIDE EFFECTS:
+**
+** {@description or none@}
+**
+** DESIGN:
+**
+** Enabling a device function in the SMC37c669 controller involves
+** setting all of its mappings (port, irq, drq ...). A local
+** "shadow" copy of the device configuration is kept so we can
+** just set each mapping to what the local copy says.
+**
+** This function ALWAYS updates the local shadow configuration of
+** the device function being enabled, even if the device is always
+** enabled. To avoid replication of code, functions such as
+** configure_device set up the local copy and then call this
+** function to the update the real device.
+**
+**--
+*/
+unsigned int __init SMC37c669_enable_device ( unsigned int func )
+{
+ unsigned int ret_val = FALSE;
+/*
+** Put the device into configuration mode
+*/
+ SMC37c669_config_mode( TRUE );
+ switch ( func ) {
+ case SERIAL_0:
+ {
+ SMC37c669_SERIAL_BASE_ADDRESS_REGISTER base_addr;
+ SMC37c669_SERIAL_IRQ_REGISTER irq;
+/*
+** Enable the serial 1 IRQ mapping
+*/
+ irq.as_uchar =
+ SMC37c669_read_config( SMC37c669_SERIAL_IRQ_INDEX );
+
+ irq.by_field.uart1_irq =
+ SMC37c669_RAW_DEVICE_IRQ(
+ SMC37c669_xlate_irq( local_config[ func ].irq )
+ );
+
+ SMC37c669_write_config( SMC37c669_SERIAL_IRQ_INDEX, irq.as_uchar );
+/*
+** Enable the serial 1 port base address mapping
+*/
+ base_addr.as_uchar = 0;
+ base_addr.by_field.addr9_3 = local_config[ func ].port1 >> 3;
+
+ SMC37c669_write_config(
+ SMC37c669_SERIAL0_BASE_ADDRESS_INDEX,
+ base_addr.as_uchar
+ );
+ ret_val = TRUE;
+ break;
+ }
+ case SERIAL_1:
+ {
+ SMC37c669_SERIAL_BASE_ADDRESS_REGISTER base_addr;
+ SMC37c669_SERIAL_IRQ_REGISTER irq;
+/*
+** Enable the serial 2 IRQ mapping
+*/
+ irq.as_uchar =
+ SMC37c669_read_config( SMC37c669_SERIAL_IRQ_INDEX );
+
+ irq.by_field.uart2_irq =
+ SMC37c669_RAW_DEVICE_IRQ(
+ SMC37c669_xlate_irq( local_config[ func ].irq )
+ );
+
+ SMC37c669_write_config( SMC37c669_SERIAL_IRQ_INDEX, irq.as_uchar );
+/*
+** Enable the serial 2 port base address mapping
+*/
+ base_addr.as_uchar = 0;
+ base_addr.by_field.addr9_3 = local_config[ func ].port1 >> 3;
+
+ SMC37c669_write_config(
+ SMC37c669_SERIAL1_BASE_ADDRESS_INDEX,
+ base_addr.as_uchar
+ );
+ ret_val = TRUE;
+ break;
+ }
+ case PARALLEL_0:
+ {
+ SMC37c669_PARALLEL_BASE_ADDRESS_REGISTER base_addr;
+ SMC37c669_PARALLEL_FDC_IRQ_REGISTER irq;
+ SMC37c669_PARALLEL_FDC_DRQ_REGISTER drq;
+/*
+** Enable the parallel port DMA channel mapping
+*/
+ drq.as_uchar =
+ SMC37c669_read_config( SMC37c669_PARALLEL_FDC_DRQ_INDEX );
+
+ drq.by_field.ppt_drq =
+ SMC37c669_RAW_DEVICE_DRQ(
+ SMC37c669_xlate_drq( local_config[ func ].drq )
+ );
+
+ SMC37c669_write_config(
+ SMC37c669_PARALLEL_FDC_DRQ_INDEX,
+ drq.as_uchar
+ );
+/*
+** Enable the parallel port IRQ mapping
+*/
+ irq.as_uchar =
+ SMC37c669_read_config( SMC37c669_PARALLEL_FDC_IRQ_INDEX );
+
+ irq.by_field.ppt_irq =
+ SMC37c669_RAW_DEVICE_IRQ(
+ SMC37c669_xlate_irq( local_config[ func ].irq )
+ );
+
+ SMC37c669_write_config(
+ SMC37c669_PARALLEL_FDC_IRQ_INDEX,
+ irq.as_uchar
+ );
+/*
+** Enable the parallel port base address mapping
+*/
+ base_addr.as_uchar = 0;
+ base_addr.by_field.addr9_2 = local_config[ func ].port1 >> 2;
+
+ SMC37c669_write_config(
+ SMC37c669_PARALLEL0_BASE_ADDRESS_INDEX,
+ base_addr.as_uchar
+ );
+ ret_val = TRUE;
+ break;
+ }
+ case FLOPPY_0:
+ {
+ SMC37c669_FDC_BASE_ADDRESS_REGISTER base_addr;
+ SMC37c669_PARALLEL_FDC_IRQ_REGISTER irq;
+ SMC37c669_PARALLEL_FDC_DRQ_REGISTER drq;
+/*
+** Enable the floppy controller DMA channel mapping
+*/
+ drq.as_uchar =
+ SMC37c669_read_config( SMC37c669_PARALLEL_FDC_DRQ_INDEX );
+
+ drq.by_field.fdc_drq =
+ SMC37c669_RAW_DEVICE_DRQ(
+ SMC37c669_xlate_drq( local_config[ func ].drq )
+ );
+
+ SMC37c669_write_config(
+ SMC37c669_PARALLEL_FDC_DRQ_INDEX,
+ drq.as_uchar
+ );
+/*
+** Enable the floppy controller IRQ mapping
+*/
+ irq.as_uchar =
+ SMC37c669_read_config( SMC37c669_PARALLEL_FDC_IRQ_INDEX );
+
+ irq.by_field.fdc_irq =
+ SMC37c669_RAW_DEVICE_IRQ(
+ SMC37c669_xlate_irq( local_config[ func ].irq )
+ );
+
+ SMC37c669_write_config(
+ SMC37c669_PARALLEL_FDC_IRQ_INDEX,
+ irq.as_uchar
+ );
+/*
+** Enable the floppy controller base address mapping
+*/
+ base_addr.as_uchar = 0;
+ base_addr.by_field.addr9_4 = local_config[ func ].port1 >> 4;
+
+ SMC37c669_write_config(
+ SMC37c669_FDC_BASE_ADDRESS_INDEX,
+ base_addr.as_uchar
+ );
+ ret_val = TRUE;
+ break;
+ }
+ case IDE_0:
+ {
+ SMC37c669_IDE_ADDRESS_REGISTER ide_addr;
+/*
+** Enable the IDE alternate status base address mapping
+*/
+ ide_addr.as_uchar = 0;
+ ide_addr.by_field.addr9_4 = local_config[ func ].port2 >> 4;
+
+ SMC37c669_write_config(
+ SMC37c669_IDE_ALTERNATE_ADDRESS_INDEX,
+ ide_addr.as_uchar
+ );
+/*
+** Enable the IDE controller base address mapping
+*/
+ ide_addr.as_uchar = 0;
+ ide_addr.by_field.addr9_4 = local_config[ func ].port1 >> 4;
+
+ SMC37c669_write_config(
+ SMC37c669_IDE_BASE_ADDRESS_INDEX,
+ ide_addr.as_uchar
+ );
+ ret_val = TRUE;
+ break;
+ }
+ }
+/*
+** Exit configuration mode and return
+*/
+ SMC37c669_config_mode( FALSE );
+
+ return ret_val;
+}
+
+
+/*
+**++
+** FUNCTIONAL DESCRIPTION:
+**
+** This function disables a device function within the
+** SMC37c669 Super I/O controller.
+**
+** FORMAL PARAMETERS:
+**
+** func:
+** Which function to disable
+**
+** RETURN VALUE:
+**
+** Return TRUE if the device function was disabled, otherwise, FALSE
+**
+** SIDE EFFECTS:
+**
+** {@description or none@}
+**
+** DESIGN:
+**
+** Disabling a function in the SMC37c669 device involves
+** disabling all the function's mappings (port, irq, drq ...).
+** A shadow copy of the device configuration is maintained
+** in local storage so we won't worry aboving saving the
+** current configuration information.
+**
+**--
+*/
+unsigned int __init SMC37c669_disable_device ( unsigned int func )
+{
+ unsigned int ret_val = FALSE;
+
+/*
+** Put the device into configuration mode
+*/
+ SMC37c669_config_mode( TRUE );
+ switch ( func ) {
+ case SERIAL_0:
+ {
+ SMC37c669_SERIAL_BASE_ADDRESS_REGISTER base_addr;
+ SMC37c669_SERIAL_IRQ_REGISTER irq;
+/*
+** Disable the serial 1 IRQ mapping
+*/
+ irq.as_uchar =
+ SMC37c669_read_config( SMC37c669_SERIAL_IRQ_INDEX );
+
+ irq.by_field.uart1_irq = 0;
+
+ SMC37c669_write_config( SMC37c669_SERIAL_IRQ_INDEX, irq.as_uchar );
+/*
+** Disable the serial 1 port base address mapping
+*/
+ base_addr.as_uchar = 0;
+ SMC37c669_write_config(
+ SMC37c669_SERIAL0_BASE_ADDRESS_INDEX,
+ base_addr.as_uchar
+ );
+ ret_val = TRUE;
+ break;
+ }
+ case SERIAL_1:
+ {
+ SMC37c669_SERIAL_BASE_ADDRESS_REGISTER base_addr;
+ SMC37c669_SERIAL_IRQ_REGISTER irq;
+/*
+** Disable the serial 2 IRQ mapping
+*/
+ irq.as_uchar =
+ SMC37c669_read_config( SMC37c669_SERIAL_IRQ_INDEX );
+
+ irq.by_field.uart2_irq = 0;
+
+ SMC37c669_write_config( SMC37c669_SERIAL_IRQ_INDEX, irq.as_uchar );
+/*
+** Disable the serial 2 port base address mapping
+*/
+ base_addr.as_uchar = 0;
+
+ SMC37c669_write_config(
+ SMC37c669_SERIAL1_BASE_ADDRESS_INDEX,
+ base_addr.as_uchar
+ );
+ ret_val = TRUE;
+ break;
+ }
+ case PARALLEL_0:
+ {
+ SMC37c669_PARALLEL_BASE_ADDRESS_REGISTER base_addr;
+ SMC37c669_PARALLEL_FDC_IRQ_REGISTER irq;
+ SMC37c669_PARALLEL_FDC_DRQ_REGISTER drq;
+/*
+** Disable the parallel port DMA channel mapping
+*/
+ drq.as_uchar =
+ SMC37c669_read_config( SMC37c669_PARALLEL_FDC_DRQ_INDEX );
+
+ drq.by_field.ppt_drq = 0;
+
+ SMC37c669_write_config(
+ SMC37c669_PARALLEL_FDC_DRQ_INDEX,
+ drq.as_uchar
+ );
+/*
+** Disable the parallel port IRQ mapping
+*/
+ irq.as_uchar =
+ SMC37c669_read_config( SMC37c669_PARALLEL_FDC_IRQ_INDEX );
+
+ irq.by_field.ppt_irq = 0;
+
+ SMC37c669_write_config(
+ SMC37c669_PARALLEL_FDC_IRQ_INDEX,
+ irq.as_uchar
+ );
+/*
+** Disable the parallel port base address mapping
+*/
+ base_addr.as_uchar = 0;
+
+ SMC37c669_write_config(
+ SMC37c669_PARALLEL0_BASE_ADDRESS_INDEX,
+ base_addr.as_uchar
+ );
+ ret_val = TRUE;
+ break;
+ }
+ case FLOPPY_0:
+ {
+ SMC37c669_FDC_BASE_ADDRESS_REGISTER base_addr;
+ SMC37c669_PARALLEL_FDC_IRQ_REGISTER irq;
+ SMC37c669_PARALLEL_FDC_DRQ_REGISTER drq;
+/*
+** Disable the floppy controller DMA channel mapping
+*/
+ drq.as_uchar =
+ SMC37c669_read_config( SMC37c669_PARALLEL_FDC_DRQ_INDEX );
+
+ drq.by_field.fdc_drq = 0;
+
+ SMC37c669_write_config(
+ SMC37c669_PARALLEL_FDC_DRQ_INDEX,
+ drq.as_uchar
+ );
+/*
+** Disable the floppy controller IRQ mapping
+*/
+ irq.as_uchar =
+ SMC37c669_read_config( SMC37c669_PARALLEL_FDC_IRQ_INDEX );
+
+ irq.by_field.fdc_irq = 0;
+
+ SMC37c669_write_config(
+ SMC37c669_PARALLEL_FDC_IRQ_INDEX,
+ irq.as_uchar
+ );
+/*
+** Disable the floppy controller base address mapping
+*/
+ base_addr.as_uchar = 0;
+
+ SMC37c669_write_config(
+ SMC37c669_FDC_BASE_ADDRESS_INDEX,
+ base_addr.as_uchar
+ );
+ ret_val = TRUE;
+ break;
+ }
+ case IDE_0:
+ {
+ SMC37c669_IDE_ADDRESS_REGISTER ide_addr;
+/*
+** Disable the IDE alternate status base address mapping
+*/
+ ide_addr.as_uchar = 0;
+
+ SMC37c669_write_config(
+ SMC37c669_IDE_ALTERNATE_ADDRESS_INDEX,
+ ide_addr.as_uchar
+ );
+/*
+** Disable the IDE controller base address mapping
+*/
+ ide_addr.as_uchar = 0;
+
+ SMC37c669_write_config(
+ SMC37c669_IDE_BASE_ADDRESS_INDEX,
+ ide_addr.as_uchar
+ );
+ ret_val = TRUE;
+ break;
+ }
+ }
+/*
+** Exit configuration mode and return
+*/
+ SMC37c669_config_mode( FALSE );
+
+ return ret_val;
+}
+
+
+/*
+**++
+** FUNCTIONAL DESCRIPTION:
+**
+** This function configures a device function within the
+** SMC37c669 Super I/O controller.
+**
+** FORMAL PARAMETERS:
+**
+** func:
+** Which device function
+**
+** port:
+** I/O port for the function to use
+**
+** irq:
+** IRQ for the device function to use
+**
+** drq:
+** DMA channel for the device function to use
+**
+** RETURN VALUE:
+**
+** Returns TRUE if the device function was configured,
+** otherwise, FALSE.
+**
+** SIDE EFFECTS:
+**
+** {@description or none@}
+**
+** DESIGN:
+**
+** If this function returns TRUE, the local shadow copy of
+** the configuration is also updated. If the device function
+** is currently disabled, only the local shadow copy is
+** updated and the actual device function will be updated
+** if/when it is enabled.
+**
+**--
+*/
+unsigned int __init SMC37c669_configure_device (
+ unsigned int func,
+ int port,
+ int irq,
+ int drq )
+{
+ struct DEVICE_CONFIG *cp;
+
+/*
+** Check for a valid configuration
+*/
+ if ( ( cp = SMC37c669_get_config ( func ) ) != NULL ) {
+/*
+** Configuration is valid, update the local shadow copy
+*/
+ if ( ( drq & ~0xFF ) == 0 ) {
+ cp->drq = drq;
+ }
+ if ( ( irq & ~0xFF ) == 0 ) {
+ cp->irq = irq;
+ }
+ if ( ( port & ~0xFFFF ) == 0 ) {
+ cp->port1 = port;
+ }
+/*
+** If the device function is enabled, update the actual
+** device configuration.
+*/
+ if ( SMC37c669_is_device_enabled( func ) ) {
+ SMC37c669_enable_device( func );
+ }
+ return TRUE;
+ }
+ return FALSE;
+}
+
+
+/*
+**++
+** FUNCTIONAL DESCRIPTION:
+**
+** This function determines whether a device function
+** within the SMC37c669 controller is enabled.
+**
+** FORMAL PARAMETERS:
+**
+** func:
+** Which device function
+**
+** RETURN VALUE:
+**
+** Returns TRUE if the device function is enabled, otherwise, FALSE
+**
+** SIDE EFFECTS:
+**
+** {@description or none@}
+**
+** DESIGN:
+**
+** To check whether a device is enabled we will only look at
+** the port base address mapping. According to the SMC37c669
+** specification, all of the port base address mappings are
+** disabled if the addr<9:8> (bits <7:6> of the register) are
+** zero.
+**
+**--
+*/
+static unsigned int __init SMC37c669_is_device_enabled ( unsigned int func )
+{
+ unsigned char base_addr = 0;
+ unsigned int dev_ok = FALSE;
+ unsigned int ret_val = FALSE;
+/*
+** Enter configuration mode
+*/
+ SMC37c669_config_mode( TRUE );
+
+ switch ( func ) {
+ case SERIAL_0:
+ base_addr =
+ SMC37c669_read_config( SMC37c669_SERIAL0_BASE_ADDRESS_INDEX );
+ dev_ok = TRUE;
+ break;
+ case SERIAL_1:
+ base_addr =
+ SMC37c669_read_config( SMC37c669_SERIAL1_BASE_ADDRESS_INDEX );
+ dev_ok = TRUE;
+ break;
+ case PARALLEL_0:
+ base_addr =
+ SMC37c669_read_config( SMC37c669_PARALLEL0_BASE_ADDRESS_INDEX );
+ dev_ok = TRUE;
+ break;
+ case FLOPPY_0:
+ base_addr =
+ SMC37c669_read_config( SMC37c669_FDC_BASE_ADDRESS_INDEX );
+ dev_ok = TRUE;
+ break;
+ case IDE_0:
+ base_addr =
+ SMC37c669_read_config( SMC37c669_IDE_BASE_ADDRESS_INDEX );
+ dev_ok = TRUE;
+ break;
+ }
+/*
+** If we have a valid device, check base_addr<7:6> to see if the
+** device is enabled (mapped).
+*/
+ if ( ( dev_ok ) && ( ( base_addr & 0xC0 ) != 0 ) ) {
+/*
+** The mapping is not disabled, so assume that the function is
+** enabled.
+*/
+ ret_val = TRUE;
+ }
+/*
+** Exit configuration mode
+*/
+ SMC37c669_config_mode( FALSE );
+
+ return ret_val;
+}
+
+
+#if 0
+/*
+**++
+** FUNCTIONAL DESCRIPTION:
+**
+** This function retrieves the configuration information of a
+** device function within the SMC37c699 Super I/O controller.
+**
+** FORMAL PARAMETERS:
+**
+** func:
+** Which device function
+**
+** port:
+** I/O port returned
+**
+** irq:
+** IRQ returned
+**
+** drq:
+** DMA channel returned
+**
+** RETURN VALUE:
+**
+** Returns TRUE if the device configuration was successfully
+** retrieved, otherwise, FALSE.
+**
+** SIDE EFFECTS:
+**
+** The data pointed to by the port, irq, and drq parameters
+** my be modified even if the configuration is not successfully
+** retrieved.
+**
+** DESIGN:
+**
+** The device configuration is fetched from the local shadow
+** copy. Any unused parameters will be set to -1. Any
+** parameter which is not desired can specify the NULL
+** pointer.
+**
+**--
+*/
+static unsigned int __init SMC37c669_get_device_config (
+ unsigned int func,
+ int *port,
+ int *irq,
+ int *drq )
+{
+ struct DEVICE_CONFIG *cp;
+ unsigned int ret_val = FALSE;
+/*
+** Check for a valid device configuration
+*/
+ if ( ( cp = SMC37c669_get_config( func ) ) != NULL ) {
+ if ( drq != NULL ) {
+ *drq = cp->drq;
+ ret_val = TRUE;
+ }
+ if ( irq != NULL ) {
+ *irq = cp->irq;
+ ret_val = TRUE;
+ }
+ if ( port != NULL ) {
+ *port = cp->port1;
+ ret_val = TRUE;
+ }
+ }
+ return ret_val;
+}
+#endif
+
+
+/*
+**++
+** FUNCTIONAL DESCRIPTION:
+**
+** This function displays the current state of the SMC37c699
+** Super I/O controller's device functions.
+**
+** FORMAL PARAMETERS:
+**
+** None
+**
+** RETURN VALUE:
+**
+** None
+**
+** SIDE EFFECTS:
+**
+** None
+**
+**--
+*/
+void __init SMC37c669_display_device_info ( void )
+{
+ if ( SMC37c669_is_device_enabled( SERIAL_0 ) ) {
+ printk( " Serial 0: Enabled [ Port 0x%x, IRQ %d ]\n",
+ local_config[ SERIAL_0 ].port1,
+ local_config[ SERIAL_0 ].irq
+ );
+ }
+ else {
+ printk( " Serial 0: Disabled\n" );
+ }
+
+ if ( SMC37c669_is_device_enabled( SERIAL_1 ) ) {
+ printk( " Serial 1: Enabled [ Port 0x%x, IRQ %d ]\n",
+ local_config[ SERIAL_1 ].port1,
+ local_config[ SERIAL_1 ].irq
+ );
+ }
+ else {
+ printk( " Serial 1: Disabled\n" );
+ }
+
+ if ( SMC37c669_is_device_enabled( PARALLEL_0 ) ) {
+ printk( " Parallel: Enabled [ Port 0x%x, IRQ %d/%d ]\n",
+ local_config[ PARALLEL_0 ].port1,
+ local_config[ PARALLEL_0 ].irq,
+ local_config[ PARALLEL_0 ].drq
+ );
+ }
+ else {
+ printk( " Parallel: Disabled\n" );
+ }
+
+ if ( SMC37c669_is_device_enabled( FLOPPY_0 ) ) {
+ printk( " Floppy Ctrl: Enabled [ Port 0x%x, IRQ %d/%d ]\n",
+ local_config[ FLOPPY_0 ].port1,
+ local_config[ FLOPPY_0 ].irq,
+ local_config[ FLOPPY_0 ].drq
+ );
+ }
+ else {
+ printk( " Floppy Ctrl: Disabled\n" );
+ }
+
+ if ( SMC37c669_is_device_enabled( IDE_0 ) ) {
+ printk( " IDE 0: Enabled [ Port 0x%x, IRQ %d ]\n",
+ local_config[ IDE_0 ].port1,
+ local_config[ IDE_0 ].irq
+ );
+ }
+ else {
+ printk( " IDE 0: Disabled\n" );
+ }
+}
+
+
+/*
+**++
+** FUNCTIONAL DESCRIPTION:
+**
+** This function puts the SMC37c669 Super I/O controller into,
+** and takes it out of, configuration mode.
+**
+** FORMAL PARAMETERS:
+**
+** enable:
+** TRUE to enter configuration mode, FALSE to exit.
+**
+** RETURN VALUE:
+**
+** None
+**
+** SIDE EFFECTS:
+**
+** The SMC37c669 controller may be left in configuration mode.
+**
+**--
+*/
+static void __init SMC37c669_config_mode(
+ unsigned int enable )
+{
+ if ( enable ) {
+/*
+** To enter configuration mode, two writes in succession to the index
+** port are required. If a write to another address or port occurs
+** between these two writes, the chip does not enter configuration
+** mode. Therefore, a spinlock is placed around the two writes to
+** guarantee that they complete uninterrupted.
+*/
+ spin_lock(&smc_lock);
+ wb( &SMC37c669->index_port, SMC37c669_CONFIG_ON_KEY );
+ wb( &SMC37c669->index_port, SMC37c669_CONFIG_ON_KEY );
+ spin_unlock(&smc_lock);
+ }
+ else {
+ wb( &SMC37c669->index_port, SMC37c669_CONFIG_OFF_KEY );
+ }
+}
+
+/*
+**++
+** FUNCTIONAL DESCRIPTION:
+**
+** This function reads an SMC37c669 Super I/O controller
+** configuration register. This function assumes that the
+** device is already in configuration mode.
+**
+** FORMAL PARAMETERS:
+**
+** index:
+** Index value of configuration register to read
+**
+** RETURN VALUE:
+**
+** Data read from configuration register
+**
+** SIDE EFFECTS:
+**
+** None
+**
+**--
+*/
+static unsigned char __init SMC37c669_read_config(
+ unsigned char index )
+{
+ unsigned char data;
+
+ wb( &SMC37c669->index_port, index );
+ data = rb( &SMC37c669->data_port );
+ return data;
+}
+
+/*
+**++
+** FUNCTIONAL DESCRIPTION:
+**
+** This function writes an SMC37c669 Super I/O controller
+** configuration register. This function assumes that the
+** device is already in configuration mode.
+**
+** FORMAL PARAMETERS:
+**
+** index:
+** Index of configuration register to write
+**
+** data:
+** Data to be written
+**
+** RETURN VALUE:
+**
+** None
+**
+** SIDE EFFECTS:
+**
+** None
+**
+**--
+*/
+static void __init SMC37c669_write_config(
+ unsigned char index,
+ unsigned char data )
+{
+ wb( &SMC37c669->index_port, index );
+ wb( &SMC37c669->data_port, data );
+}
+
+
+/*
+**++
+** FUNCTIONAL DESCRIPTION:
+**
+** This function initializes the local device
+** configuration storage. This function assumes
+** that the device is already in configuration
+** mode.
+**
+** FORMAL PARAMETERS:
+**
+** None
+**
+** RETURN VALUE:
+**
+** None
+**
+** SIDE EFFECTS:
+**
+** Local storage for device configuration information
+** is initialized.
+**
+**--
+*/
+static void __init SMC37c669_init_local_config ( void )
+{
+ SMC37c669_SERIAL_BASE_ADDRESS_REGISTER uart_base;
+ SMC37c669_SERIAL_IRQ_REGISTER uart_irqs;
+ SMC37c669_PARALLEL_BASE_ADDRESS_REGISTER ppt_base;
+ SMC37c669_PARALLEL_FDC_IRQ_REGISTER ppt_fdc_irqs;
+ SMC37c669_PARALLEL_FDC_DRQ_REGISTER ppt_fdc_drqs;
+ SMC37c669_FDC_BASE_ADDRESS_REGISTER fdc_base;
+ SMC37c669_IDE_ADDRESS_REGISTER ide_base;
+ SMC37c669_IDE_ADDRESS_REGISTER ide_alt;
+
+/*
+** Get serial port 1 base address
+*/
+ uart_base.as_uchar =
+ SMC37c669_read_config( SMC37c669_SERIAL0_BASE_ADDRESS_INDEX );
+/*
+** Get IRQs for serial ports 1 & 2
+*/
+ uart_irqs.as_uchar =
+ SMC37c669_read_config( SMC37c669_SERIAL_IRQ_INDEX );
+/*
+** Store local configuration information for serial port 1
+*/
+ local_config[SERIAL_0].port1 = uart_base.by_field.addr9_3 << 3;
+ local_config[SERIAL_0].irq =
+ SMC37c669_xlate_irq(
+ SMC37c669_DEVICE_IRQ( uart_irqs.by_field.uart1_irq )
+ );
+/*
+** Get serial port 2 base address
+*/
+ uart_base.as_uchar =
+ SMC37c669_read_config( SMC37c669_SERIAL1_BASE_ADDRESS_INDEX );
+/*
+** Store local configuration information for serial port 2
+*/
+ local_config[SERIAL_1].port1 = uart_base.by_field.addr9_3 << 3;
+ local_config[SERIAL_1].irq =
+ SMC37c669_xlate_irq(
+ SMC37c669_DEVICE_IRQ( uart_irqs.by_field.uart2_irq )
+ );
+/*
+** Get parallel port base address
+*/
+ ppt_base.as_uchar =
+ SMC37c669_read_config( SMC37c669_PARALLEL0_BASE_ADDRESS_INDEX );
+/*
+** Get IRQs for parallel port and floppy controller
+*/
+ ppt_fdc_irqs.as_uchar =
+ SMC37c669_read_config( SMC37c669_PARALLEL_FDC_IRQ_INDEX );
+/*
+** Get DRQs for parallel port and floppy controller
+*/
+ ppt_fdc_drqs.as_uchar =
+ SMC37c669_read_config( SMC37c669_PARALLEL_FDC_DRQ_INDEX );
+/*
+** Store local configuration information for parallel port
+*/
+ local_config[PARALLEL_0].port1 = ppt_base.by_field.addr9_2 << 2;
+ local_config[PARALLEL_0].irq =
+ SMC37c669_xlate_irq(
+ SMC37c669_DEVICE_IRQ( ppt_fdc_irqs.by_field.ppt_irq )
+ );
+ local_config[PARALLEL_0].drq =
+ SMC37c669_xlate_drq(
+ SMC37c669_DEVICE_DRQ( ppt_fdc_drqs.by_field.ppt_drq )
+ );
+/*
+** Get floppy controller base address
+*/
+ fdc_base.as_uchar =
+ SMC37c669_read_config( SMC37c669_FDC_BASE_ADDRESS_INDEX );
+/*
+** Store local configuration information for floppy controller
+*/
+ local_config[FLOPPY_0].port1 = fdc_base.by_field.addr9_4 << 4;
+ local_config[FLOPPY_0].irq =
+ SMC37c669_xlate_irq(
+ SMC37c669_DEVICE_IRQ( ppt_fdc_irqs.by_field.fdc_irq )
+ );
+ local_config[FLOPPY_0].drq =
+ SMC37c669_xlate_drq(
+ SMC37c669_DEVICE_DRQ( ppt_fdc_drqs.by_field.fdc_drq )
+ );
+/*
+** Get IDE controller base address
+*/
+ ide_base.as_uchar =
+ SMC37c669_read_config( SMC37c669_IDE_BASE_ADDRESS_INDEX );
+/*
+** Get IDE alternate status base address
+*/
+ ide_alt.as_uchar =
+ SMC37c669_read_config( SMC37c669_IDE_ALTERNATE_ADDRESS_INDEX );
+/*
+** Store local configuration information for IDE controller
+*/
+ local_config[IDE_0].port1 = ide_base.by_field.addr9_4 << 4;
+ local_config[IDE_0].port2 = ide_alt.by_field.addr9_4 << 4;
+ local_config[IDE_0].irq = 14;
+}
+
+
+/*
+**++
+** FUNCTIONAL DESCRIPTION:
+**
+** This function returns a pointer to the local shadow
+** configuration of the requested device function.
+**
+** FORMAL PARAMETERS:
+**
+** func:
+** Which device function
+**
+** RETURN VALUE:
+**
+** Returns a pointer to the DEVICE_CONFIG structure for the
+** requested function, otherwise, NULL.
+**
+** SIDE EFFECTS:
+**
+** {@description or none@}
+**
+**--
+*/
+static struct DEVICE_CONFIG * __init SMC37c669_get_config( unsigned int func )
+{
+ struct DEVICE_CONFIG *cp = NULL;
+
+ switch ( func ) {
+ case SERIAL_0:
+ cp = &local_config[ SERIAL_0 ];
+ break;
+ case SERIAL_1:
+ cp = &local_config[ SERIAL_1 ];
+ break;
+ case PARALLEL_0:
+ cp = &local_config[ PARALLEL_0 ];
+ break;
+ case FLOPPY_0:
+ cp = &local_config[ FLOPPY_0 ];
+ break;
+ case IDE_0:
+ cp = &local_config[ IDE_0 ];
+ break;
+ }
+ return cp;
+}
+
+/*
+**++
+** FUNCTIONAL DESCRIPTION:
+**
+** This function translates IRQs back and forth between ISA
+** IRQs and SMC37c669 device IRQs.
+**
+** FORMAL PARAMETERS:
+**
+** irq:
+** The IRQ to translate
+**
+** RETURN VALUE:
+**
+** Returns the translated IRQ, otherwise, returns -1.
+**
+** SIDE EFFECTS:
+**
+** {@description or none@}
+**
+**--
+*/
+static int __init SMC37c669_xlate_irq ( int irq )
+{
+ int i, translated_irq = -1;
+
+ if ( SMC37c669_IS_DEVICE_IRQ( irq ) ) {
+/*
+** We are translating a device IRQ to an ISA IRQ
+*/
+ for ( i = 0; ( SMC37c669_irq_table[i].device_irq != -1 ) || ( SMC37c669_irq_table[i].isa_irq != -1 ); i++ ) {
+ if ( irq == SMC37c669_irq_table[i].device_irq ) {
+ translated_irq = SMC37c669_irq_table[i].isa_irq;
+ break;
+ }
+ }
+ }
+ else {
+/*
+** We are translating an ISA IRQ to a device IRQ
+*/
+ for ( i = 0; ( SMC37c669_irq_table[i].isa_irq != -1 ) || ( SMC37c669_irq_table[i].device_irq != -1 ); i++ ) {
+ if ( irq == SMC37c669_irq_table[i].isa_irq ) {
+ translated_irq = SMC37c669_irq_table[i].device_irq;
+ break;
+ }
+ }
+ }
+ return translated_irq;
+}
+
+
+/*
+**++
+** FUNCTIONAL DESCRIPTION:
+**
+** This function translates DMA channels back and forth between
+** ISA DMA channels and SMC37c669 device DMA channels.
+**
+** FORMAL PARAMETERS:
+**
+** drq:
+** The DMA channel to translate
+**
+** RETURN VALUE:
+**
+** Returns the translated DMA channel, otherwise, returns -1
+**
+** SIDE EFFECTS:
+**
+** {@description or none@}
+**
+**--
+*/
+static int __init SMC37c669_xlate_drq ( int drq )
+{
+ int i, translated_drq = -1;
+
+ if ( SMC37c669_IS_DEVICE_DRQ( drq ) ) {
+/*
+** We are translating a device DMA channel to an ISA DMA channel
+*/
+ for ( i = 0; ( SMC37c669_drq_table[i].device_drq != -1 ) || ( SMC37c669_drq_table[i].isa_drq != -1 ); i++ ) {
+ if ( drq == SMC37c669_drq_table[i].device_drq ) {
+ translated_drq = SMC37c669_drq_table[i].isa_drq;
+ break;
+ }
+ }
+ }
+ else {
+/*
+** We are translating an ISA DMA channel to a device DMA channel
+*/
+ for ( i = 0; ( SMC37c669_drq_table[i].isa_drq != -1 ) || ( SMC37c669_drq_table[i].device_drq != -1 ); i++ ) {
+ if ( drq == SMC37c669_drq_table[i].isa_drq ) {
+ translated_drq = SMC37c669_drq_table[i].device_drq;
+ break;
+ }
+ }
+ }
+ return translated_drq;
+}
+
+#if 0
+int __init smcc669_init ( void )
+{
+ struct INODE *ip;
+
+ allocinode( smc_ddb.name, 1, &ip );
+ ip->dva = &smc_ddb;
+ ip->attr = ATTR$M_WRITE | ATTR$M_READ;
+ ip->len[0] = 0x30;
+ ip->misc = 0;
+ INODE_UNLOCK( ip );
+
+ return msg_success;
+}
+
+int __init smcc669_open( struct FILE *fp, char *info, char *next, char *mode )
+{
+ struct INODE *ip;
+/*
+** Allow multiple readers but only one writer. ip->misc keeps track
+** of the number of writers
+*/
+ ip = fp->ip;
+ INODE_LOCK( ip );
+ if ( fp->mode & ATTR$M_WRITE ) {
+ if ( ip->misc ) {
+ INODE_UNLOCK( ip );
+ return msg_failure; /* too many writers */
+ }
+ ip->misc++;
+ }
+/*
+** Treat the information field as a byte offset
+*/
+ *fp->offset = xtoi( info );
+ INODE_UNLOCK( ip );
+
+ return msg_success;
+}
+
+int __init smcc669_close( struct FILE *fp )
+{
+ struct INODE *ip;
+
+ ip = fp->ip;
+ if ( fp->mode & ATTR$M_WRITE ) {
+ INODE_LOCK( ip );
+ ip->misc--;
+ INODE_UNLOCK( ip );
+ }
+ return msg_success;
+}
+
+int __init smcc669_read( struct FILE *fp, int size, int number, unsigned char *buf )
+{
+ int i;
+ int length;
+ int nbytes;
+ struct INODE *ip;
+
+/*
+** Always access a byte at a time
+*/
+ ip = fp->ip;
+ length = size * number;
+ nbytes = 0;
+
+ SMC37c669_config_mode( TRUE );
+ for ( i = 0; i < length; i++ ) {
+ if ( !inrange( *fp->offset, 0, ip->len[0] ) )
+ break;
+ *buf++ = SMC37c669_read_config( *fp->offset );
+ *fp->offset += 1;
+ nbytes++;
+ }
+ SMC37c669_config_mode( FALSE );
+ return nbytes;
+}
+
+int __init smcc669_write( struct FILE *fp, int size, int number, unsigned char *buf )
+{
+ int i;
+ int length;
+ int nbytes;
+ struct INODE *ip;
+/*
+** Always access a byte at a time
+*/
+ ip = fp->ip;
+ length = size * number;
+ nbytes = 0;
+
+ SMC37c669_config_mode( TRUE );
+ for ( i = 0; i < length; i++ ) {
+ if ( !inrange( *fp->offset, 0, ip->len[0] ) )
+ break;
+ SMC37c669_write_config( *fp->offset, *buf );
+ *fp->offset += 1;
+ buf++;
+ nbytes++;
+ }
+ SMC37c669_config_mode( FALSE );
+ return nbytes;
+}
+#endif
+
+void __init
+SMC37c669_dump_registers(void)
+{
+ int i;
+ for (i = 0; i <= 0x29; i++)
+ printk("-- CR%02x : %02x\n", i, SMC37c669_read_config(i));
+}
+/*+
+ * ============================================================================
+ * = SMC_init - SMC37c669 Super I/O controller initialization =
+ * ============================================================================
+ *
+ * OVERVIEW:
+ *
+ * This routine configures and enables device functions on the
+ * SMC37c669 Super I/O controller.
+ *
+ * FORM OF CALL:
+ *
+ * SMC_init( );
+ *
+ * RETURNS:
+ *
+ * Nothing
+ *
+ * ARGUMENTS:
+ *
+ * None
+ *
+ * SIDE EFFECTS:
+ *
+ * None
+ *
+ */
+void __init SMC669_Init ( int index )
+{
+ SMC37c669_CONFIG_REGS *SMC_base;
+ unsigned long flags;
+
+ local_irq_save(flags);
+ if ( ( SMC_base = SMC37c669_detect( index ) ) != NULL ) {
+#if SMC_DEBUG
+ SMC37c669_config_mode( TRUE );
+ SMC37c669_dump_registers( );
+ SMC37c669_config_mode( FALSE );
+ SMC37c669_display_device_info( );
+#endif
+ SMC37c669_disable_device( SERIAL_0 );
+ SMC37c669_configure_device(
+ SERIAL_0,
+ COM1_BASE,
+ COM1_IRQ,
+ -1
+ );
+ SMC37c669_enable_device( SERIAL_0 );
+
+ SMC37c669_disable_device( SERIAL_1 );
+ SMC37c669_configure_device(
+ SERIAL_1,
+ COM2_BASE,
+ COM2_IRQ,
+ -1
+ );
+ SMC37c669_enable_device( SERIAL_1 );
+
+ SMC37c669_disable_device( PARALLEL_0 );
+ SMC37c669_configure_device(
+ PARALLEL_0,
+ PARP_BASE,
+ PARP_IRQ,
+ PARP_DRQ
+ );
+ SMC37c669_enable_device( PARALLEL_0 );
+
+ SMC37c669_disable_device( FLOPPY_0 );
+ SMC37c669_configure_device(
+ FLOPPY_0,
+ FDC_BASE,
+ FDC_IRQ,
+ FDC_DRQ
+ );
+ SMC37c669_enable_device( FLOPPY_0 );
+
+ /* Wake up sometimes forgotten floppy, especially on DP264. */
+ outb(0xc, 0x3f2);
+
+ SMC37c669_disable_device( IDE_0 );
+
+#if SMC_DEBUG
+ SMC37c669_config_mode( TRUE );
+ SMC37c669_dump_registers( );
+ SMC37c669_config_mode( FALSE );
+ SMC37c669_display_device_info( );
+#endif
+ local_irq_restore(flags);
+ printk( "SMC37c669 Super I/O Controller found @ 0x%lx\n",
+ (unsigned long) SMC_base );
+ }
+ else {
+ local_irq_restore(flags);
+#if SMC_DEBUG
+ printk( "No SMC37c669 Super I/O Controller found\n" );
+#endif
+ }
+}
diff --git a/arch/alpha/kernel/smc37c93x.c b/arch/alpha/kernel/smc37c93x.c
new file mode 100644
index 000000000000..421e51ea6bb7
--- /dev/null
+++ b/arch/alpha/kernel/smc37c93x.c
@@ -0,0 +1,277 @@
+/*
+ * SMC 37C93X initialization code
+ */
+
+#include <linux/config.h>
+#include <linux/kernel.h>
+
+#include <linux/slab.h>
+#include <linux/mm.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+
+#include <asm/hwrpb.h>
+#include <asm/io.h>
+#include <asm/segment.h>
+
+#define SMC_DEBUG 0
+
+#if SMC_DEBUG
+# define DBG_DEVS(args) printk args
+#else
+# define DBG_DEVS(args)
+#endif
+
+#define KB 1024
+#define MB (1024*KB)
+#define GB (1024*MB)
+
+/* device "activate" register contents */
+#define DEVICE_ON 1
+#define DEVICE_OFF 0
+
+/* configuration on/off keys */
+#define CONFIG_ON_KEY 0x55
+#define CONFIG_OFF_KEY 0xaa
+
+/* configuration space device definitions */
+#define FDC 0
+#define IDE1 1
+#define IDE2 2
+#define PARP 3
+#define SER1 4
+#define SER2 5
+#define RTCL 6
+#define KYBD 7
+#define AUXIO 8
+
+/* Chip register offsets from base */
+#define CONFIG_CONTROL 0x02
+#define INDEX_ADDRESS 0x03
+#define LOGICAL_DEVICE_NUMBER 0x07
+#define DEVICE_ID 0x20
+#define DEVICE_REV 0x21
+#define POWER_CONTROL 0x22
+#define POWER_MGMT 0x23
+#define OSC 0x24
+
+#define ACTIVATE 0x30
+#define ADDR_HI 0x60
+#define ADDR_LO 0x61
+#define INTERRUPT_SEL 0x70
+#define INTERRUPT_SEL_2 0x72 /* KYBD/MOUS only */
+#define DMA_CHANNEL_SEL 0x74 /* FDC/PARP only */
+
+#define FDD_MODE_REGISTER 0x90
+#define FDD_OPTION_REGISTER 0x91
+
+/* values that we read back that are expected ... */
+#define VALID_DEVICE_ID 2
+
+/* default device addresses */
+#define KYBD_INTERRUPT 1
+#define MOUS_INTERRUPT 12
+#define COM2_BASE 0x2f8
+#define COM2_INTERRUPT 3
+#define COM1_BASE 0x3f8
+#define COM1_INTERRUPT 4
+#define PARP_BASE 0x3bc
+#define PARP_INTERRUPT 7
+
+static unsigned long __init SMCConfigState(unsigned long baseAddr)
+{
+ unsigned char devId;
+ unsigned char devRev;
+
+ unsigned long configPort;
+ unsigned long indexPort;
+ unsigned long dataPort;
+
+ int i;
+
+ configPort = indexPort = baseAddr;
+ dataPort = configPort + 1;
+
+#define NUM_RETRIES 5
+
+ for (i = 0; i < NUM_RETRIES; i++)
+ {
+ outb(CONFIG_ON_KEY, configPort);
+ outb(CONFIG_ON_KEY, configPort);
+ outb(DEVICE_ID, indexPort);
+ devId = inb(dataPort);
+ if (devId == VALID_DEVICE_ID) {
+ outb(DEVICE_REV, indexPort);
+ devRev = inb(dataPort);
+ break;
+ }
+ else
+ udelay(100);
+ }
+ return (i != NUM_RETRIES) ? baseAddr : 0L;
+}
+
+static void __init SMCRunState(unsigned long baseAddr)
+{
+ outb(CONFIG_OFF_KEY, baseAddr);
+}
+
+static unsigned long __init SMCDetectUltraIO(void)
+{
+ unsigned long baseAddr;
+
+ baseAddr = 0x3F0;
+ if ( ( baseAddr = SMCConfigState( baseAddr ) ) == 0x3F0 ) {
+ return( baseAddr );
+ }
+ baseAddr = 0x370;
+ if ( ( baseAddr = SMCConfigState( baseAddr ) ) == 0x370 ) {
+ return( baseAddr );
+ }
+ return( ( unsigned long )0 );
+}
+
+static void __init SMCEnableDevice(unsigned long baseAddr,
+ unsigned long device,
+ unsigned long portaddr,
+ unsigned long interrupt)
+{
+ unsigned long indexPort;
+ unsigned long dataPort;
+
+ indexPort = baseAddr;
+ dataPort = baseAddr + 1;
+
+ outb(LOGICAL_DEVICE_NUMBER, indexPort);
+ outb(device, dataPort);
+
+ outb(ADDR_LO, indexPort);
+ outb(( portaddr & 0xFF ), dataPort);
+
+ outb(ADDR_HI, indexPort);
+ outb((portaddr >> 8) & 0xFF, dataPort);
+
+ outb(INTERRUPT_SEL, indexPort);
+ outb(interrupt, dataPort);
+
+ outb(ACTIVATE, indexPort);
+ outb(DEVICE_ON, dataPort);
+}
+
+static void __init SMCEnableKYBD(unsigned long baseAddr)
+{
+ unsigned long indexPort;
+ unsigned long dataPort;
+
+ indexPort = baseAddr;
+ dataPort = baseAddr + 1;
+
+ outb(LOGICAL_DEVICE_NUMBER, indexPort);
+ outb(KYBD, dataPort);
+
+ outb(INTERRUPT_SEL, indexPort); /* Primary interrupt select */
+ outb(KYBD_INTERRUPT, dataPort);
+
+ outb(INTERRUPT_SEL_2, indexPort); /* Secondary interrupt select */
+ outb(MOUS_INTERRUPT, dataPort);
+
+ outb(ACTIVATE, indexPort);
+ outb(DEVICE_ON, dataPort);
+}
+
+static void __init SMCEnableFDC(unsigned long baseAddr)
+{
+ unsigned long indexPort;
+ unsigned long dataPort;
+
+ unsigned char oldValue;
+
+ indexPort = baseAddr;
+ dataPort = baseAddr + 1;
+
+ outb(LOGICAL_DEVICE_NUMBER, indexPort);
+ outb(FDC, dataPort);
+
+ outb(FDD_MODE_REGISTER, indexPort);
+ oldValue = inb(dataPort);
+
+ oldValue |= 0x0E; /* Enable burst mode */
+ outb(oldValue, dataPort);
+
+ outb(INTERRUPT_SEL, indexPort); /* Primary interrupt select */
+ outb(0x06, dataPort );
+
+ outb(DMA_CHANNEL_SEL, indexPort); /* DMA channel select */
+ outb(0x02, dataPort);
+
+ outb(ACTIVATE, indexPort);
+ outb(DEVICE_ON, dataPort);
+}
+
+#if SMC_DEBUG
+static void __init SMCReportDeviceStatus(unsigned long baseAddr)
+{
+ unsigned long indexPort;
+ unsigned long dataPort;
+ unsigned char currentControl;
+
+ indexPort = baseAddr;
+ dataPort = baseAddr + 1;
+
+ outb(POWER_CONTROL, indexPort);
+ currentControl = inb(dataPort);
+
+ printk(currentControl & (1 << FDC)
+ ? "\t+FDC Enabled\n" : "\t-FDC Disabled\n");
+ printk(currentControl & (1 << IDE1)
+ ? "\t+IDE1 Enabled\n" : "\t-IDE1 Disabled\n");
+ printk(currentControl & (1 << IDE2)
+ ? "\t+IDE2 Enabled\n" : "\t-IDE2 Disabled\n");
+ printk(currentControl & (1 << PARP)
+ ? "\t+PARP Enabled\n" : "\t-PARP Disabled\n");
+ printk(currentControl & (1 << SER1)
+ ? "\t+SER1 Enabled\n" : "\t-SER1 Disabled\n");
+ printk(currentControl & (1 << SER2)
+ ? "\t+SER2 Enabled\n" : "\t-SER2 Disabled\n");
+
+ printk( "\n" );
+}
+#endif
+
+int __init SMC93x_Init(void)
+{
+ unsigned long SMCUltraBase;
+ unsigned long flags;
+
+ local_irq_save(flags);
+ if ((SMCUltraBase = SMCDetectUltraIO()) != 0UL) {
+#if SMC_DEBUG
+ SMCReportDeviceStatus(SMCUltraBase);
+#endif
+ SMCEnableDevice(SMCUltraBase, SER1, COM1_BASE, COM1_INTERRUPT);
+ DBG_DEVS(("SMC FDC37C93X: SER1 done\n"));
+ SMCEnableDevice(SMCUltraBase, SER2, COM2_BASE, COM2_INTERRUPT);
+ DBG_DEVS(("SMC FDC37C93X: SER2 done\n"));
+ SMCEnableDevice(SMCUltraBase, PARP, PARP_BASE, PARP_INTERRUPT);
+ DBG_DEVS(("SMC FDC37C93X: PARP done\n"));
+ /* On PC164, IDE on the SMC is not enabled;
+ CMD646 (PCI) on MB */
+ SMCEnableKYBD(SMCUltraBase);
+ DBG_DEVS(("SMC FDC37C93X: KYB done\n"));
+ SMCEnableFDC(SMCUltraBase);
+ DBG_DEVS(("SMC FDC37C93X: FDC done\n"));
+#if SMC_DEBUG
+ SMCReportDeviceStatus(SMCUltraBase);
+#endif
+ SMCRunState(SMCUltraBase);
+ local_irq_restore(flags);
+ printk("SMC FDC37C93X Ultra I/O Controller found @ 0x%lx\n",
+ SMCUltraBase);
+ return 1;
+ }
+ else {
+ local_irq_restore(flags);
+ DBG_DEVS(("No SMC FDC37C93X Ultra I/O Controller found\n"));
+ return 0;
+ }
+}
diff --git a/arch/alpha/kernel/smp.c b/arch/alpha/kernel/smp.c
new file mode 100644
index 000000000000..8f1e78551b1e
--- /dev/null
+++ b/arch/alpha/kernel/smp.c
@@ -0,0 +1,1163 @@
+/*
+ * linux/arch/alpha/kernel/smp.c
+ *
+ * 2001-07-09 Phil Ezolt (Phillip.Ezolt@compaq.com)
+ * Renamed modified smp_call_function to smp_call_function_on_cpu()
+ * Created an function that conforms to the old calling convention
+ * of smp_call_function().
+ *
+ * This is helpful for DCPI.
+ *
+ */
+
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/kernel_stat.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/threads.h>
+#include <linux/smp.h>
+#include <linux/smp_lock.h>
+#include <linux/interrupt.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/spinlock.h>
+#include <linux/irq.h>
+#include <linux/cache.h>
+#include <linux/profile.h>
+#include <linux/bitops.h>
+
+#include <asm/hwrpb.h>
+#include <asm/ptrace.h>
+#include <asm/atomic.h>
+
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/pgtable.h>
+#include <asm/pgalloc.h>
+#include <asm/mmu_context.h>
+#include <asm/tlbflush.h>
+
+#include "proto.h"
+#include "irq_impl.h"
+
+
+#define DEBUG_SMP 0
+#if DEBUG_SMP
+#define DBGS(args) printk args
+#else
+#define DBGS(args)
+#endif
+
+/* A collection of per-processor data. */
+struct cpuinfo_alpha cpu_data[NR_CPUS];
+
+/* A collection of single bit ipi messages. */
+static struct {
+ unsigned long bits ____cacheline_aligned;
+} ipi_data[NR_CPUS] __cacheline_aligned;
+
+enum ipi_message_type {
+ IPI_RESCHEDULE,
+ IPI_CALL_FUNC,
+ IPI_CPU_STOP,
+};
+
+/* Set to a secondary's cpuid when it comes online. */
+static int smp_secondary_alive __initdata = 0;
+
+/* Which cpus ids came online. */
+cpumask_t cpu_present_mask;
+cpumask_t cpu_online_map;
+
+EXPORT_SYMBOL(cpu_online_map);
+
+/* cpus reported in the hwrpb */
+static unsigned long hwrpb_cpu_present_mask __initdata = 0;
+
+int smp_num_probed; /* Internal processor count */
+int smp_num_cpus = 1; /* Number that came online. */
+
+extern void calibrate_delay(void);
+
+
+
+/*
+ * Called by both boot and secondaries to move global data into
+ * per-processor storage.
+ */
+static inline void __init
+smp_store_cpu_info(int cpuid)
+{
+ cpu_data[cpuid].loops_per_jiffy = loops_per_jiffy;
+ cpu_data[cpuid].last_asn = ASN_FIRST_VERSION;
+ cpu_data[cpuid].need_new_asn = 0;
+ cpu_data[cpuid].asn_lock = 0;
+}
+
+/*
+ * Ideally sets up per-cpu profiling hooks. Doesn't do much now...
+ */
+static inline void __init
+smp_setup_percpu_timer(int cpuid)
+{
+ cpu_data[cpuid].prof_counter = 1;
+ cpu_data[cpuid].prof_multiplier = 1;
+}
+
+static void __init
+wait_boot_cpu_to_stop(int cpuid)
+{
+ unsigned long stop = jiffies + 10*HZ;
+
+ while (time_before(jiffies, stop)) {
+ if (!smp_secondary_alive)
+ return;
+ barrier();
+ }
+
+ printk("wait_boot_cpu_to_stop: FAILED on CPU %d, hanging now\n", cpuid);
+ for (;;)
+ barrier();
+}
+
+/*
+ * Where secondaries begin a life of C.
+ */
+void __init
+smp_callin(void)
+{
+ int cpuid = hard_smp_processor_id();
+
+ if (cpu_test_and_set(cpuid, cpu_online_map)) {
+ printk("??, cpu 0x%x already present??\n", cpuid);
+ BUG();
+ }
+
+ /* Turn on machine checks. */
+ wrmces(7);
+
+ /* Set trap vectors. */
+ trap_init();
+
+ /* Set interrupt vector. */
+ wrent(entInt, 0);
+
+ /* Get our local ticker going. */
+ smp_setup_percpu_timer(cpuid);
+
+ /* Call platform-specific callin, if specified */
+ if (alpha_mv.smp_callin) alpha_mv.smp_callin();
+
+ /* All kernel threads share the same mm context. */
+ atomic_inc(&init_mm.mm_count);
+ current->active_mm = &init_mm;
+
+ /* Must have completely accurate bogos. */
+ local_irq_enable();
+
+ /* Wait boot CPU to stop with irq enabled before running
+ calibrate_delay. */
+ wait_boot_cpu_to_stop(cpuid);
+ mb();
+ calibrate_delay();
+
+ smp_store_cpu_info(cpuid);
+ /* Allow master to continue only after we written loops_per_jiffy. */
+ wmb();
+ smp_secondary_alive = 1;
+
+ DBGS(("smp_callin: commencing CPU %d current %p active_mm %p\n",
+ cpuid, current, current->active_mm));
+
+ /* Do nothing. */
+ cpu_idle();
+}
+
+/* Wait until hwrpb->txrdy is clear for cpu. Return -1 on timeout. */
+static int __init
+wait_for_txrdy (unsigned long cpumask)
+{
+ unsigned long timeout;
+
+ if (!(hwrpb->txrdy & cpumask))
+ return 0;
+
+ timeout = jiffies + 10*HZ;
+ while (time_before(jiffies, timeout)) {
+ if (!(hwrpb->txrdy & cpumask))
+ return 0;
+ udelay(10);
+ barrier();
+ }
+
+ return -1;
+}
+
+/*
+ * Send a message to a secondary's console. "START" is one such
+ * interesting message. ;-)
+ */
+static void __init
+send_secondary_console_msg(char *str, int cpuid)
+{
+ struct percpu_struct *cpu;
+ register char *cp1, *cp2;
+ unsigned long cpumask;
+ size_t len;
+
+ cpu = (struct percpu_struct *)
+ ((char*)hwrpb
+ + hwrpb->processor_offset
+ + cpuid * hwrpb->processor_size);
+
+ cpumask = (1UL << cpuid);
+ if (wait_for_txrdy(cpumask))
+ goto timeout;
+
+ cp2 = str;
+ len = strlen(cp2);
+ *(unsigned int *)&cpu->ipc_buffer[0] = len;
+ cp1 = (char *) &cpu->ipc_buffer[1];
+ memcpy(cp1, cp2, len);
+
+ /* atomic test and set */
+ wmb();
+ set_bit(cpuid, &hwrpb->rxrdy);
+
+ if (wait_for_txrdy(cpumask))
+ goto timeout;
+ return;
+
+ timeout:
+ printk("Processor %x not ready\n", cpuid);
+}
+
+/*
+ * A secondary console wants to send a message. Receive it.
+ */
+static void
+recv_secondary_console_msg(void)
+{
+ int mycpu, i, cnt;
+ unsigned long txrdy = hwrpb->txrdy;
+ char *cp1, *cp2, buf[80];
+ struct percpu_struct *cpu;
+
+ DBGS(("recv_secondary_console_msg: TXRDY 0x%lx.\n", txrdy));
+
+ mycpu = hard_smp_processor_id();
+
+ for (i = 0; i < NR_CPUS; i++) {
+ if (!(txrdy & (1UL << i)))
+ continue;
+
+ DBGS(("recv_secondary_console_msg: "
+ "TXRDY contains CPU %d.\n", i));
+
+ cpu = (struct percpu_struct *)
+ ((char*)hwrpb
+ + hwrpb->processor_offset
+ + i * hwrpb->processor_size);
+
+ DBGS(("recv_secondary_console_msg: on %d from %d"
+ " HALT_REASON 0x%lx FLAGS 0x%lx\n",
+ mycpu, i, cpu->halt_reason, cpu->flags));
+
+ cnt = cpu->ipc_buffer[0] >> 32;
+ if (cnt <= 0 || cnt >= 80)
+ strcpy(buf, "<<< BOGUS MSG >>>");
+ else {
+ cp1 = (char *) &cpu->ipc_buffer[11];
+ cp2 = buf;
+ strcpy(cp2, cp1);
+
+ while ((cp2 = strchr(cp2, '\r')) != 0) {
+ *cp2 = ' ';
+ if (cp2[1] == '\n')
+ cp2[1] = ' ';
+ }
+ }
+
+ DBGS((KERN_INFO "recv_secondary_console_msg: on %d "
+ "message is '%s'\n", mycpu, buf));
+ }
+
+ hwrpb->txrdy = 0;
+}
+
+/*
+ * Convince the console to have a secondary cpu begin execution.
+ */
+static int __init
+secondary_cpu_start(int cpuid, struct task_struct *idle)
+{
+ struct percpu_struct *cpu;
+ struct pcb_struct *hwpcb, *ipcb;
+ unsigned long timeout;
+
+ cpu = (struct percpu_struct *)
+ ((char*)hwrpb
+ + hwrpb->processor_offset
+ + cpuid * hwrpb->processor_size);
+ hwpcb = (struct pcb_struct *) cpu->hwpcb;
+ ipcb = &idle->thread_info->pcb;
+
+ /* Initialize the CPU's HWPCB to something just good enough for
+ us to get started. Immediately after starting, we'll swpctx
+ to the target idle task's pcb. Reuse the stack in the mean
+ time. Precalculate the target PCBB. */
+ hwpcb->ksp = (unsigned long)ipcb + sizeof(union thread_union) - 16;
+ hwpcb->usp = 0;
+ hwpcb->ptbr = ipcb->ptbr;
+ hwpcb->pcc = 0;
+ hwpcb->asn = 0;
+ hwpcb->unique = virt_to_phys(ipcb);
+ hwpcb->flags = ipcb->flags;
+ hwpcb->res1 = hwpcb->res2 = 0;
+
+#if 0
+ DBGS(("KSP 0x%lx PTBR 0x%lx VPTBR 0x%lx UNIQUE 0x%lx\n",
+ hwpcb->ksp, hwpcb->ptbr, hwrpb->vptb, hwpcb->unique));
+#endif
+ DBGS(("Starting secondary cpu %d: state 0x%lx pal_flags 0x%lx\n",
+ cpuid, idle->state, ipcb->flags));
+
+ /* Setup HWRPB fields that SRM uses to activate secondary CPU */
+ hwrpb->CPU_restart = __smp_callin;
+ hwrpb->CPU_restart_data = (unsigned long) __smp_callin;
+
+ /* Recalculate and update the HWRPB checksum */
+ hwrpb_update_checksum(hwrpb);
+
+ /*
+ * Send a "start" command to the specified processor.
+ */
+
+ /* SRM III 3.4.1.3 */
+ cpu->flags |= 0x22; /* turn on Context Valid and Restart Capable */
+ cpu->flags &= ~1; /* turn off Bootstrap In Progress */
+ wmb();
+
+ send_secondary_console_msg("START\r\n", cpuid);
+
+ /* Wait 10 seconds for an ACK from the console. */
+ timeout = jiffies + 10*HZ;
+ while (time_before(jiffies, timeout)) {
+ if (cpu->flags & 1)
+ goto started;
+ udelay(10);
+ barrier();
+ }
+ printk(KERN_ERR "SMP: Processor %d failed to start.\n", cpuid);
+ return -1;
+
+ started:
+ DBGS(("secondary_cpu_start: SUCCESS for CPU %d!!!\n", cpuid));
+ return 0;
+}
+
+/*
+ * Bring one cpu online.
+ */
+static int __init
+smp_boot_one_cpu(int cpuid)
+{
+ struct task_struct *idle;
+ unsigned long timeout;
+
+ /* Cook up an idler for this guy. Note that the address we
+ give to kernel_thread is irrelevant -- it's going to start
+ where HWRPB.CPU_restart says to start. But this gets all
+ the other task-y sort of data structures set up like we
+ wish. We can't use kernel_thread since we must avoid
+ rescheduling the child. */
+ idle = fork_idle(cpuid);
+ if (IS_ERR(idle))
+ panic("failed fork for CPU %d", cpuid);
+
+ DBGS(("smp_boot_one_cpu: CPU %d state 0x%lx flags 0x%lx\n",
+ cpuid, idle->state, idle->flags));
+
+ /* Signal the secondary to wait a moment. */
+ smp_secondary_alive = -1;
+
+ /* Whirrr, whirrr, whirrrrrrrrr... */
+ if (secondary_cpu_start(cpuid, idle))
+ return -1;
+
+ /* Notify the secondary CPU it can run calibrate_delay. */
+ mb();
+ smp_secondary_alive = 0;
+
+ /* We've been acked by the console; wait one second for
+ the task to start up for real. */
+ timeout = jiffies + 1*HZ;
+ while (time_before(jiffies, timeout)) {
+ if (smp_secondary_alive == 1)
+ goto alive;
+ udelay(10);
+ barrier();
+ }
+
+ /* We failed to boot the CPU. */
+
+ printk(KERN_ERR "SMP: Processor %d is stuck.\n", cpuid);
+ return -1;
+
+ alive:
+ /* Another "Red Snapper". */
+ return 0;
+}
+
+/*
+ * Called from setup_arch. Detect an SMP system and which processors
+ * are present.
+ */
+void __init
+setup_smp(void)
+{
+ struct percpu_struct *cpubase, *cpu;
+ unsigned long i;
+
+ if (boot_cpuid != 0) {
+ printk(KERN_WARNING "SMP: Booting off cpu %d instead of 0?\n",
+ boot_cpuid);
+ }
+
+ if (hwrpb->nr_processors > 1) {
+ int boot_cpu_palrev;
+
+ DBGS(("setup_smp: nr_processors %ld\n",
+ hwrpb->nr_processors));
+
+ cpubase = (struct percpu_struct *)
+ ((char*)hwrpb + hwrpb->processor_offset);
+ boot_cpu_palrev = cpubase->pal_revision;
+
+ for (i = 0; i < hwrpb->nr_processors; i++) {
+ cpu = (struct percpu_struct *)
+ ((char *)cpubase + i*hwrpb->processor_size);
+ if ((cpu->flags & 0x1cc) == 0x1cc) {
+ smp_num_probed++;
+ /* Assume here that "whami" == index */
+ hwrpb_cpu_present_mask |= (1UL << i);
+ cpu->pal_revision = boot_cpu_palrev;
+ }
+
+ DBGS(("setup_smp: CPU %d: flags 0x%lx type 0x%lx\n",
+ i, cpu->flags, cpu->type));
+ DBGS(("setup_smp: CPU %d: PAL rev 0x%lx\n",
+ i, cpu->pal_revision));
+ }
+ } else {
+ smp_num_probed = 1;
+ hwrpb_cpu_present_mask = (1UL << boot_cpuid);
+ }
+ cpu_present_mask = cpumask_of_cpu(boot_cpuid);
+
+ printk(KERN_INFO "SMP: %d CPUs probed -- cpu_present_mask = %lx\n",
+ smp_num_probed, hwrpb_cpu_present_mask);
+}
+
+/*
+ * Called by smp_init prepare the secondaries
+ */
+void __init
+smp_prepare_cpus(unsigned int max_cpus)
+{
+ int cpu_count, i;
+
+ /* Take care of some initial bookkeeping. */
+ memset(ipi_data, 0, sizeof(ipi_data));
+
+ current_thread_info()->cpu = boot_cpuid;
+
+ smp_store_cpu_info(boot_cpuid);
+ smp_setup_percpu_timer(boot_cpuid);
+
+ /* Nothing to do on a UP box, or when told not to. */
+ if (smp_num_probed == 1 || max_cpus == 0) {
+ cpu_present_mask = cpumask_of_cpu(boot_cpuid);
+ printk(KERN_INFO "SMP mode deactivated.\n");
+ return;
+ }
+
+ printk(KERN_INFO "SMP starting up secondaries.\n");
+
+ cpu_count = 1;
+ for (i = 0; (i < NR_CPUS) && (cpu_count < max_cpus); i++) {
+ if (i == boot_cpuid)
+ continue;
+
+ if (((hwrpb_cpu_present_mask >> i) & 1) == 0)
+ continue;
+
+ cpu_set(i, cpu_possible_map);
+ cpu_count++;
+ }
+
+ smp_num_cpus = cpu_count;
+}
+
+void __devinit
+smp_prepare_boot_cpu(void)
+{
+ /*
+ * Mark the boot cpu (current cpu) as both present and online
+ */
+ cpu_set(smp_processor_id(), cpu_present_mask);
+ cpu_set(smp_processor_id(), cpu_online_map);
+}
+
+int __devinit
+__cpu_up(unsigned int cpu)
+{
+ smp_boot_one_cpu(cpu);
+
+ return cpu_online(cpu) ? 0 : -ENOSYS;
+}
+
+void __init
+smp_cpus_done(unsigned int max_cpus)
+{
+ int cpu;
+ unsigned long bogosum = 0;
+
+ for(cpu = 0; cpu < NR_CPUS; cpu++)
+ if (cpu_online(cpu))
+ bogosum += cpu_data[cpu].loops_per_jiffy;
+
+ printk(KERN_INFO "SMP: Total of %d processors activated "
+ "(%lu.%02lu BogoMIPS).\n",
+ num_online_cpus(),
+ (bogosum + 2500) / (500000/HZ),
+ ((bogosum + 2500) / (5000/HZ)) % 100);
+}
+
+
+void
+smp_percpu_timer_interrupt(struct pt_regs *regs)
+{
+ int cpu = smp_processor_id();
+ unsigned long user = user_mode(regs);
+ struct cpuinfo_alpha *data = &cpu_data[cpu];
+
+ /* Record kernel PC. */
+ profile_tick(CPU_PROFILING, regs);
+
+ if (!--data->prof_counter) {
+ /* We need to make like a normal interrupt -- otherwise
+ timer interrupts ignore the global interrupt lock,
+ which would be a Bad Thing. */
+ irq_enter();
+
+ update_process_times(user);
+
+ data->prof_counter = data->prof_multiplier;
+
+ irq_exit();
+ }
+}
+
+int __init
+setup_profiling_timer(unsigned int multiplier)
+{
+ return -EINVAL;
+}
+
+
+static void
+send_ipi_message(cpumask_t to_whom, enum ipi_message_type operation)
+{
+ int i;
+
+ mb();
+ for_each_cpu_mask(i, to_whom)
+ set_bit(operation, &ipi_data[i].bits);
+
+ mb();
+ for_each_cpu_mask(i, to_whom)
+ wripir(i);
+}
+
+/* Structure and data for smp_call_function. This is designed to
+ minimize static memory requirements. Plus it looks cleaner. */
+
+struct smp_call_struct {
+ void (*func) (void *info);
+ void *info;
+ long wait;
+ atomic_t unstarted_count;
+ atomic_t unfinished_count;
+};
+
+static struct smp_call_struct *smp_call_function_data;
+
+/* Atomicly drop data into a shared pointer. The pointer is free if
+ it is initially locked. If retry, spin until free. */
+
+static int
+pointer_lock (void *lock, void *data, int retry)
+{
+ void *old, *tmp;
+
+ mb();
+ again:
+ /* Compare and swap with zero. */
+ asm volatile (
+ "1: ldq_l %0,%1\n"
+ " mov %3,%2\n"
+ " bne %0,2f\n"
+ " stq_c %2,%1\n"
+ " beq %2,1b\n"
+ "2:"
+ : "=&r"(old), "=m"(*(void **)lock), "=&r"(tmp)
+ : "r"(data)
+ : "memory");
+
+ if (old == 0)
+ return 0;
+ if (! retry)
+ return -EBUSY;
+
+ while (*(void **)lock)
+ barrier();
+ goto again;
+}
+
+void
+handle_ipi(struct pt_regs *regs)
+{
+ int this_cpu = smp_processor_id();
+ unsigned long *pending_ipis = &ipi_data[this_cpu].bits;
+ unsigned long ops;
+
+#if 0
+ DBGS(("handle_ipi: on CPU %d ops 0x%lx PC 0x%lx\n",
+ this_cpu, *pending_ipis, regs->pc));
+#endif
+
+ mb(); /* Order interrupt and bit testing. */
+ while ((ops = xchg(pending_ipis, 0)) != 0) {
+ mb(); /* Order bit clearing and data access. */
+ do {
+ unsigned long which;
+
+ which = ops & -ops;
+ ops &= ~which;
+ which = __ffs(which);
+
+ switch (which) {
+ case IPI_RESCHEDULE:
+ /* Reschedule callback. Everything to be done
+ is done by the interrupt return path. */
+ break;
+
+ case IPI_CALL_FUNC:
+ {
+ struct smp_call_struct *data;
+ void (*func)(void *info);
+ void *info;
+ int wait;
+
+ data = smp_call_function_data;
+ func = data->func;
+ info = data->info;
+ wait = data->wait;
+
+ /* Notify the sending CPU that the data has been
+ received, and execution is about to begin. */
+ mb();
+ atomic_dec (&data->unstarted_count);
+
+ /* At this point the structure may be gone unless
+ wait is true. */
+ (*func)(info);
+
+ /* Notify the sending CPU that the task is done. */
+ mb();
+ if (wait) atomic_dec (&data->unfinished_count);
+ break;
+ }
+
+ case IPI_CPU_STOP:
+ halt();
+
+ default:
+ printk(KERN_CRIT "Unknown IPI on CPU %d: %lu\n",
+ this_cpu, which);
+ break;
+ }
+ } while (ops);
+
+ mb(); /* Order data access and bit testing. */
+ }
+
+ cpu_data[this_cpu].ipi_count++;
+
+ if (hwrpb->txrdy)
+ recv_secondary_console_msg();
+}
+
+void
+smp_send_reschedule(int cpu)
+{
+#ifdef DEBUG_IPI_MSG
+ if (cpu == hard_smp_processor_id())
+ printk(KERN_WARNING
+ "smp_send_reschedule: Sending IPI to self.\n");
+#endif
+ send_ipi_message(cpumask_of_cpu(cpu), IPI_RESCHEDULE);
+}
+
+void
+smp_send_stop(void)
+{
+ cpumask_t to_whom = cpu_possible_map;
+ cpu_clear(smp_processor_id(), to_whom);
+#ifdef DEBUG_IPI_MSG
+ if (hard_smp_processor_id() != boot_cpu_id)
+ printk(KERN_WARNING "smp_send_stop: Not on boot cpu.\n");
+#endif
+ send_ipi_message(to_whom, IPI_CPU_STOP);
+}
+
+/*
+ * Run a function on all other CPUs.
+ * <func> The function to run. This must be fast and non-blocking.
+ * <info> An arbitrary pointer to pass to the function.
+ * <retry> If true, keep retrying until ready.
+ * <wait> If true, wait until function has completed on other CPUs.
+ * [RETURNS] 0 on success, else a negative status code.
+ *
+ * Does not return until remote CPUs are nearly ready to execute <func>
+ * or are or have executed.
+ * You must not call this function with disabled interrupts or from a
+ * hardware interrupt handler or from a bottom half handler.
+ */
+
+int
+smp_call_function_on_cpu (void (*func) (void *info), void *info, int retry,
+ int wait, cpumask_t to_whom)
+{
+ struct smp_call_struct data;
+ unsigned long timeout;
+ int num_cpus_to_call;
+
+ /* Can deadlock when called with interrupts disabled */
+ WARN_ON(irqs_disabled());
+
+ data.func = func;
+ data.info = info;
+ data.wait = wait;
+
+ cpu_clear(smp_processor_id(), to_whom);
+ num_cpus_to_call = cpus_weight(to_whom);
+
+ atomic_set(&data.unstarted_count, num_cpus_to_call);
+ atomic_set(&data.unfinished_count, num_cpus_to_call);
+
+ /* Acquire the smp_call_function_data mutex. */
+ if (pointer_lock(&smp_call_function_data, &data, retry))
+ return -EBUSY;
+
+ /* Send a message to the requested CPUs. */
+ send_ipi_message(to_whom, IPI_CALL_FUNC);
+
+ /* Wait for a minimal response. */
+ timeout = jiffies + HZ;
+ while (atomic_read (&data.unstarted_count) > 0
+ && time_before (jiffies, timeout))
+ barrier();
+
+ /* If there's no response yet, log a message but allow a longer
+ * timeout period -- if we get a response this time, log
+ * a message saying when we got it..
+ */
+ if (atomic_read(&data.unstarted_count) > 0) {
+ long start_time = jiffies;
+ printk(KERN_ERR "%s: initial timeout -- trying long wait\n",
+ __FUNCTION__);
+ timeout = jiffies + 30 * HZ;
+ while (atomic_read(&data.unstarted_count) > 0
+ && time_before(jiffies, timeout))
+ barrier();
+ if (atomic_read(&data.unstarted_count) <= 0) {
+ long delta = jiffies - start_time;
+ printk(KERN_ERR
+ "%s: response %ld.%ld seconds into long wait\n",
+ __FUNCTION__, delta / HZ,
+ (100 * (delta - ((delta / HZ) * HZ))) / HZ);
+ }
+ }
+
+ /* We either got one or timed out -- clear the lock. */
+ mb();
+ smp_call_function_data = NULL;
+
+ /*
+ * If after both the initial and long timeout periods we still don't
+ * have a response, something is very wrong...
+ */
+ BUG_ON(atomic_read (&data.unstarted_count) > 0);
+
+ /* Wait for a complete response, if needed. */
+ if (wait) {
+ while (atomic_read (&data.unfinished_count) > 0)
+ barrier();
+ }
+
+ return 0;
+}
+
+int
+smp_call_function (void (*func) (void *info), void *info, int retry, int wait)
+{
+ return smp_call_function_on_cpu (func, info, retry, wait,
+ cpu_online_map);
+}
+
+static void
+ipi_imb(void *ignored)
+{
+ imb();
+}
+
+void
+smp_imb(void)
+{
+ /* Must wait other processors to flush their icache before continue. */
+ if (on_each_cpu(ipi_imb, NULL, 1, 1))
+ printk(KERN_CRIT "smp_imb: timed out\n");
+}
+
+static void
+ipi_flush_tlb_all(void *ignored)
+{
+ tbia();
+}
+
+void
+flush_tlb_all(void)
+{
+ /* Although we don't have any data to pass, we do want to
+ synchronize with the other processors. */
+ if (on_each_cpu(ipi_flush_tlb_all, NULL, 1, 1)) {
+ printk(KERN_CRIT "flush_tlb_all: timed out\n");
+ }
+}
+
+#define asn_locked() (cpu_data[smp_processor_id()].asn_lock)
+
+static void
+ipi_flush_tlb_mm(void *x)
+{
+ struct mm_struct *mm = (struct mm_struct *) x;
+ if (mm == current->active_mm && !asn_locked())
+ flush_tlb_current(mm);
+ else
+ flush_tlb_other(mm);
+}
+
+void
+flush_tlb_mm(struct mm_struct *mm)
+{
+ preempt_disable();
+
+ if (mm == current->active_mm) {
+ flush_tlb_current(mm);
+ if (atomic_read(&mm->mm_users) <= 1) {
+ int cpu, this_cpu = smp_processor_id();
+ for (cpu = 0; cpu < NR_CPUS; cpu++) {
+ if (!cpu_online(cpu) || cpu == this_cpu)
+ continue;
+ if (mm->context[cpu])
+ mm->context[cpu] = 0;
+ }
+ preempt_enable();
+ return;
+ }
+ }
+
+ if (smp_call_function(ipi_flush_tlb_mm, mm, 1, 1)) {
+ printk(KERN_CRIT "flush_tlb_mm: timed out\n");
+ }
+
+ preempt_enable();
+}
+
+struct flush_tlb_page_struct {
+ struct vm_area_struct *vma;
+ struct mm_struct *mm;
+ unsigned long addr;
+};
+
+static void
+ipi_flush_tlb_page(void *x)
+{
+ struct flush_tlb_page_struct *data = (struct flush_tlb_page_struct *)x;
+ struct mm_struct * mm = data->mm;
+
+ if (mm == current->active_mm && !asn_locked())
+ flush_tlb_current_page(mm, data->vma, data->addr);
+ else
+ flush_tlb_other(mm);
+}
+
+void
+flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
+{
+ struct flush_tlb_page_struct data;
+ struct mm_struct *mm = vma->vm_mm;
+
+ preempt_disable();
+
+ if (mm == current->active_mm) {
+ flush_tlb_current_page(mm, vma, addr);
+ if (atomic_read(&mm->mm_users) <= 1) {
+ int cpu, this_cpu = smp_processor_id();
+ for (cpu = 0; cpu < NR_CPUS; cpu++) {
+ if (!cpu_online(cpu) || cpu == this_cpu)
+ continue;
+ if (mm->context[cpu])
+ mm->context[cpu] = 0;
+ }
+ preempt_enable();
+ return;
+ }
+ }
+
+ data.vma = vma;
+ data.mm = mm;
+ data.addr = addr;
+
+ if (smp_call_function(ipi_flush_tlb_page, &data, 1, 1)) {
+ printk(KERN_CRIT "flush_tlb_page: timed out\n");
+ }
+
+ preempt_enable();
+}
+
+void
+flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
+{
+ /* On the Alpha we always flush the whole user tlb. */
+ flush_tlb_mm(vma->vm_mm);
+}
+
+static void
+ipi_flush_icache_page(void *x)
+{
+ struct mm_struct *mm = (struct mm_struct *) x;
+ if (mm == current->active_mm && !asn_locked())
+ __load_new_mm_context(mm);
+ else
+ flush_tlb_other(mm);
+}
+
+void
+flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
+ unsigned long addr, int len)
+{
+ struct mm_struct *mm = vma->vm_mm;
+
+ if ((vma->vm_flags & VM_EXEC) == 0)
+ return;
+
+ preempt_disable();
+
+ if (mm == current->active_mm) {
+ __load_new_mm_context(mm);
+ if (atomic_read(&mm->mm_users) <= 1) {
+ int cpu, this_cpu = smp_processor_id();
+ for (cpu = 0; cpu < NR_CPUS; cpu++) {
+ if (!cpu_online(cpu) || cpu == this_cpu)
+ continue;
+ if (mm->context[cpu])
+ mm->context[cpu] = 0;
+ }
+ preempt_enable();
+ return;
+ }
+ }
+
+ if (smp_call_function(ipi_flush_icache_page, mm, 1, 1)) {
+ printk(KERN_CRIT "flush_icache_page: timed out\n");
+ }
+
+ preempt_enable();
+}
+
+#ifdef CONFIG_DEBUG_SPINLOCK
+void
+_raw_spin_unlock(spinlock_t * lock)
+{
+ mb();
+ lock->lock = 0;
+
+ lock->on_cpu = -1;
+ lock->previous = NULL;
+ lock->task = NULL;
+ lock->base_file = "none";
+ lock->line_no = 0;
+}
+
+void
+debug_spin_lock(spinlock_t * lock, const char *base_file, int line_no)
+{
+ long tmp;
+ long stuck;
+ void *inline_pc = __builtin_return_address(0);
+ unsigned long started = jiffies;
+ int printed = 0;
+ int cpu = smp_processor_id();
+
+ stuck = 1L << 30;
+ try_again:
+
+ /* Use sub-sections to put the actual loop at the end
+ of this object file's text section so as to perfect
+ branch prediction. */
+ __asm__ __volatile__(
+ "1: ldl_l %0,%1\n"
+ " subq %2,1,%2\n"
+ " blbs %0,2f\n"
+ " or %0,1,%0\n"
+ " stl_c %0,%1\n"
+ " beq %0,3f\n"
+ "4: mb\n"
+ ".subsection 2\n"
+ "2: ldl %0,%1\n"
+ " subq %2,1,%2\n"
+ "3: blt %2,4b\n"
+ " blbs %0,2b\n"
+ " br 1b\n"
+ ".previous"
+ : "=r" (tmp), "=m" (lock->lock), "=r" (stuck)
+ : "1" (lock->lock), "2" (stuck) : "memory");
+
+ if (stuck < 0) {
+ printk(KERN_WARNING
+ "%s:%d spinlock stuck in %s at %p(%d)"
+ " owner %s at %p(%d) %s:%d\n",
+ base_file, line_no,
+ current->comm, inline_pc, cpu,
+ lock->task->comm, lock->previous,
+ lock->on_cpu, lock->base_file, lock->line_no);
+ stuck = 1L << 36;
+ printed = 1;
+ goto try_again;
+ }
+
+ /* Exiting. Got the lock. */
+ lock->on_cpu = cpu;
+ lock->previous = inline_pc;
+ lock->task = current;
+ lock->base_file = base_file;
+ lock->line_no = line_no;
+
+ if (printed) {
+ printk(KERN_WARNING
+ "%s:%d spinlock grabbed in %s at %p(%d) %ld ticks\n",
+ base_file, line_no, current->comm, inline_pc,
+ cpu, jiffies - started);
+ }
+}
+
+int
+debug_spin_trylock(spinlock_t * lock, const char *base_file, int line_no)
+{
+ int ret;
+ if ((ret = !test_and_set_bit(0, lock))) {
+ lock->on_cpu = smp_processor_id();
+ lock->previous = __builtin_return_address(0);
+ lock->task = current;
+ } else {
+ lock->base_file = base_file;
+ lock->line_no = line_no;
+ }
+ return ret;
+}
+#endif /* CONFIG_DEBUG_SPINLOCK */
+
+#ifdef CONFIG_DEBUG_RWLOCK
+void _raw_write_lock(rwlock_t * lock)
+{
+ long regx, regy;
+ int stuck_lock, stuck_reader;
+ void *inline_pc = __builtin_return_address(0);
+
+ try_again:
+
+ stuck_lock = 1<<30;
+ stuck_reader = 1<<30;
+
+ __asm__ __volatile__(
+ "1: ldl_l %1,%0\n"
+ " blbs %1,6f\n"
+ " blt %1,8f\n"
+ " mov 1,%1\n"
+ " stl_c %1,%0\n"
+ " beq %1,6f\n"
+ "4: mb\n"
+ ".subsection 2\n"
+ "6: blt %3,4b # debug\n"
+ " subl %3,1,%3 # debug\n"
+ " ldl %1,%0\n"
+ " blbs %1,6b\n"
+ "8: blt %4,4b # debug\n"
+ " subl %4,1,%4 # debug\n"
+ " ldl %1,%0\n"
+ " blt %1,8b\n"
+ " br 1b\n"
+ ".previous"
+ : "=m" (*(volatile int *)lock), "=&r" (regx), "=&r" (regy),
+ "=&r" (stuck_lock), "=&r" (stuck_reader)
+ : "0" (*(volatile int *)lock), "3" (stuck_lock), "4" (stuck_reader) : "memory");
+
+ if (stuck_lock < 0) {
+ printk(KERN_WARNING "write_lock stuck at %p\n", inline_pc);
+ goto try_again;
+ }
+ if (stuck_reader < 0) {
+ printk(KERN_WARNING "write_lock stuck on readers at %p\n",
+ inline_pc);
+ goto try_again;
+ }
+}
+
+void _raw_read_lock(rwlock_t * lock)
+{
+ long regx;
+ int stuck_lock;
+ void *inline_pc = __builtin_return_address(0);
+
+ try_again:
+
+ stuck_lock = 1<<30;
+
+ __asm__ __volatile__(
+ "1: ldl_l %1,%0;"
+ " blbs %1,6f;"
+ " subl %1,2,%1;"
+ " stl_c %1,%0;"
+ " beq %1,6f;"
+ "4: mb\n"
+ ".subsection 2\n"
+ "6: ldl %1,%0;"
+ " blt %2,4b # debug\n"
+ " subl %2,1,%2 # debug\n"
+ " blbs %1,6b;"
+ " br 1b\n"
+ ".previous"
+ : "=m" (*(volatile int *)lock), "=&r" (regx), "=&r" (stuck_lock)
+ : "0" (*(volatile int *)lock), "2" (stuck_lock) : "memory");
+
+ if (stuck_lock < 0) {
+ printk(KERN_WARNING "read_lock stuck at %p\n", inline_pc);
+ goto try_again;
+ }
+}
+#endif /* CONFIG_DEBUG_RWLOCK */
diff --git a/arch/alpha/kernel/srm_env.c b/arch/alpha/kernel/srm_env.c
new file mode 100644
index 000000000000..5c98fc83e238
--- /dev/null
+++ b/arch/alpha/kernel/srm_env.c
@@ -0,0 +1,335 @@
+/*
+ * srm_env.c - Access to SRM environment
+ * variables through linux' procfs
+ *
+ * Copyright (C) 2001-2002 Jan-Benedict Glaw <jbglaw@lug-owl.de>
+ *
+ * This driver is at all a modified version of Erik Mouw's
+ * Documentation/DocBook/procfs_example.c, so: thank
+ * you, Erik! He can be reached via email at
+ * <J.A.K.Mouw@its.tudelft.nl>. It is based on an idea
+ * provided by DEC^WCompaq^WIntel's "Jumpstart" CD. They
+ * included a patch like this as well. Thanks for idea!
+ *
+ * This program is free software; you can redistribute
+ * it and/or modify it under the terms of the GNU General
+ * Public License version 2 as published by the Free Software
+ * Foundation.
+ *
+ * This program is distributed in the hope that it will be
+ * useful, but WITHOUT ANY WARRANTY; without even the implied
+ * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
+ * PURPOSE. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place,
+ * Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+/*
+ * Changelog
+ * ~~~~~~~~~
+ *
+ * Thu, 22 Aug 2002 15:10:43 +0200
+ * - Update Config.help entry. I got a number of emails asking
+ * me to tell their senders if they could make use of this
+ * piece of code... So: "SRM is something like BIOS for your
+ * Alpha"
+ * - Update code formatting a bit to better conform CodingStyle
+ * rules.
+ * - So this is v0.0.5, with no changes (except formatting)
+ *
+ * Wed, 22 May 2002 00:11:21 +0200
+ * - Fix typo on comment (SRC -> SRM)
+ * - Call this "Version 0.0.4"
+ *
+ * Tue, 9 Apr 2002 18:44:40 +0200
+ * - Implement access by variable name and additionally
+ * by number. This is done by creating two subdirectories
+ * where one holds all names (like the old directory
+ * did) and the other holding 256 files named like "0",
+ * "1" and so on.
+ * - Call this "Version 0.0.3"
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/proc_fs.h>
+#include <asm/console.h>
+#include <asm/uaccess.h>
+#include <asm/machvec.h>
+
+#define BASE_DIR "srm_environment" /* Subdir in /proc/ */
+#define NAMED_DIR "named_variables" /* Subdir for known variables */
+#define NUMBERED_DIR "numbered_variables" /* Subdir for all variables */
+#define VERSION "0.0.5" /* Module version */
+#define NAME "srm_env" /* Module name */
+
+MODULE_AUTHOR("Jan-Benedict Glaw <jbglaw@lug-owl.de>");
+MODULE_DESCRIPTION("Accessing Alpha SRM environment through procfs interface");
+MODULE_LICENSE("GPL");
+
+typedef struct _srm_env {
+ char *name;
+ unsigned long id;
+ struct proc_dir_entry *proc_entry;
+} srm_env_t;
+
+static struct proc_dir_entry *base_dir;
+static struct proc_dir_entry *named_dir;
+static struct proc_dir_entry *numbered_dir;
+static char number[256][4];
+
+static srm_env_t srm_named_entries[] = {
+ { "auto_action", ENV_AUTO_ACTION },
+ { "boot_dev", ENV_BOOT_DEV },
+ { "bootdef_dev", ENV_BOOTDEF_DEV },
+ { "booted_dev", ENV_BOOTED_DEV },
+ { "boot_file", ENV_BOOT_FILE },
+ { "booted_file", ENV_BOOTED_FILE },
+ { "boot_osflags", ENV_BOOT_OSFLAGS },
+ { "booted_osflags", ENV_BOOTED_OSFLAGS },
+ { "boot_reset", ENV_BOOT_RESET },
+ { "dump_dev", ENV_DUMP_DEV },
+ { "enable_audit", ENV_ENABLE_AUDIT },
+ { "license", ENV_LICENSE },
+ { "char_set", ENV_CHAR_SET },
+ { "language", ENV_LANGUAGE },
+ { "tty_dev", ENV_TTY_DEV },
+ { NULL, 0 },
+};
+static srm_env_t srm_numbered_entries[256];
+
+
+
+static int
+srm_env_read(char *page, char **start, off_t off, int count, int *eof,
+ void *data)
+{
+ int nbytes;
+ unsigned long ret;
+ srm_env_t *entry;
+
+ if(off != 0)
+ return -EFAULT;
+
+ entry = (srm_env_t *) data;
+ ret = callback_getenv(entry->id, page, count);
+
+ if((ret >> 61) == 0)
+ nbytes = (int) ret;
+ else
+ nbytes = -EFAULT;
+
+ return nbytes;
+}
+
+
+static int
+srm_env_write(struct file *file, const char __user *buffer, unsigned long count,
+ void *data)
+{
+ int res;
+ srm_env_t *entry;
+ char *buf = (char *) __get_free_page(GFP_USER);
+ unsigned long ret1, ret2;
+
+ entry = (srm_env_t *) data;
+
+ if (!buf)
+ return -ENOMEM;
+
+ res = -EINVAL;
+ if (count >= PAGE_SIZE)
+ goto out;
+
+ res = -EFAULT;
+ if (copy_from_user(buf, buffer, count))
+ goto out;
+ buf[count] = '\0';
+
+ ret1 = callback_setenv(entry->id, buf, count);
+ if ((ret1 >> 61) == 0) {
+ do
+ ret2 = callback_save_env();
+ while((ret2 >> 61) == 1);
+ res = (int) ret1;
+ }
+
+ out:
+ free_page((unsigned long)buf);
+ return res;
+}
+
+static void
+srm_env_cleanup(void)
+{
+ srm_env_t *entry;
+ unsigned long var_num;
+
+ if(base_dir) {
+ /*
+ * Remove named entries
+ */
+ if(named_dir) {
+ entry = srm_named_entries;
+ while(entry->name != NULL && entry->id != 0) {
+ if(entry->proc_entry) {
+ remove_proc_entry(entry->name,
+ named_dir);
+ entry->proc_entry = NULL;
+ }
+ entry++;
+ }
+ remove_proc_entry(NAMED_DIR, base_dir);
+ }
+
+ /*
+ * Remove numbered entries
+ */
+ if(numbered_dir) {
+ for(var_num = 0; var_num <= 255; var_num++) {
+ entry = &srm_numbered_entries[var_num];
+
+ if(entry->proc_entry) {
+ remove_proc_entry(entry->name,
+ numbered_dir);
+ entry->proc_entry = NULL;
+ entry->name = NULL;
+ }
+ }
+ remove_proc_entry(NUMBERED_DIR, base_dir);
+ }
+
+ remove_proc_entry(BASE_DIR, NULL);
+ }
+
+ return;
+}
+
+
+static int __init
+srm_env_init(void)
+{
+ srm_env_t *entry;
+ unsigned long var_num;
+
+ /*
+ * Check system
+ */
+ if(!alpha_using_srm) {
+ printk(KERN_INFO "%s: This Alpha system doesn't "
+ "know about SRM (or you've booted "
+ "SRM->MILO->Linux, which gets "
+ "misdetected)...\n", __FUNCTION__);
+ return -ENODEV;
+ }
+
+ /*
+ * Init numbers
+ */
+ for(var_num = 0; var_num <= 255; var_num++)
+ sprintf(number[var_num], "%ld", var_num);
+
+ /*
+ * Create base directory
+ */
+ base_dir = proc_mkdir(BASE_DIR, NULL);
+ if(base_dir == NULL) {
+ printk(KERN_ERR "Couldn't create base dir /proc/%s\n",
+ BASE_DIR);
+ goto cleanup;
+ }
+ base_dir->owner = THIS_MODULE;
+
+ /*
+ * Create per-name subdirectory
+ */
+ named_dir = proc_mkdir(NAMED_DIR, base_dir);
+ if(named_dir == NULL) {
+ printk(KERN_ERR "Couldn't create dir /proc/%s/%s\n",
+ BASE_DIR, NAMED_DIR);
+ goto cleanup;
+ }
+ named_dir->owner = THIS_MODULE;
+
+ /*
+ * Create per-number subdirectory
+ */
+ numbered_dir = proc_mkdir(NUMBERED_DIR, base_dir);
+ if(numbered_dir == NULL) {
+ printk(KERN_ERR "Couldn't create dir /proc/%s/%s\n",
+ BASE_DIR, NUMBERED_DIR);
+ goto cleanup;
+
+ }
+ numbered_dir->owner = THIS_MODULE;
+
+ /*
+ * Create all named nodes
+ */
+ entry = srm_named_entries;
+ while(entry->name != NULL && entry->id != 0) {
+ entry->proc_entry = create_proc_entry(entry->name,
+ 0644, named_dir);
+ if(entry->proc_entry == NULL)
+ goto cleanup;
+
+ entry->proc_entry->data = (void *) entry;
+ entry->proc_entry->owner = THIS_MODULE;
+ entry->proc_entry->read_proc = srm_env_read;
+ entry->proc_entry->write_proc = srm_env_write;
+
+ entry++;
+ }
+
+ /*
+ * Create all numbered nodes
+ */
+ for(var_num = 0; var_num <= 255; var_num++) {
+ entry = &srm_numbered_entries[var_num];
+ entry->name = number[var_num];
+
+ entry->proc_entry = create_proc_entry(entry->name,
+ 0644, numbered_dir);
+ if(entry->proc_entry == NULL)
+ goto cleanup;
+
+ entry->id = var_num;
+ entry->proc_entry->data = (void *) entry;
+ entry->proc_entry->owner = THIS_MODULE;
+ entry->proc_entry->read_proc = srm_env_read;
+ entry->proc_entry->write_proc = srm_env_write;
+ }
+
+ printk(KERN_INFO "%s: version %s loaded successfully\n", NAME,
+ VERSION);
+
+ return 0;
+
+cleanup:
+ srm_env_cleanup();
+
+ return -ENOMEM;
+}
+
+
+static void __exit
+srm_env_exit(void)
+{
+ srm_env_cleanup();
+ printk(KERN_INFO "%s: unloaded successfully\n", NAME);
+
+ return;
+}
+
+
+module_init(srm_env_init);
+module_exit(srm_env_exit);
+
diff --git a/arch/alpha/kernel/srmcons.c b/arch/alpha/kernel/srmcons.c
new file mode 100644
index 000000000000..3b30d4f1fc42
--- /dev/null
+++ b/arch/alpha/kernel/srmcons.c
@@ -0,0 +1,326 @@
+/*
+ * linux/arch/alpha/kernel/srmcons.c
+ *
+ * Callback based driver for SRM Console console device.
+ * (TTY driver and console driver)
+ */
+
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/console.h>
+#include <linux/delay.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/timer.h>
+#include <linux/tty.h>
+#include <linux/tty_driver.h>
+#include <linux/tty_flip.h>
+
+#include <asm/console.h>
+#include <asm/uaccess.h>
+
+
+static DEFINE_SPINLOCK(srmcons_callback_lock);
+static int srm_is_registered_console = 0;
+
+/*
+ * The TTY driver
+ */
+#define MAX_SRM_CONSOLE_DEVICES 1 /* only support 1 console device */
+
+struct srmcons_private {
+ struct tty_struct *tty;
+ struct timer_list timer;
+ spinlock_t lock;
+};
+
+typedef union _srmcons_result {
+ struct {
+ unsigned long c :61;
+ unsigned long status :3;
+ } bits;
+ long as_long;
+} srmcons_result;
+
+/* called with callback_lock held */
+static int
+srmcons_do_receive_chars(struct tty_struct *tty)
+{
+ srmcons_result result;
+ int count = 0, loops = 0;
+
+ do {
+ result.as_long = callback_getc(0);
+ if (result.bits.status < 2) {
+ tty_insert_flip_char(tty, (char)result.bits.c, 0);
+ count++;
+ }
+ } while((result.bits.status & 1) && (++loops < 10));
+
+ if (count)
+ tty_schedule_flip(tty);
+
+ return count;
+}
+
+static void
+srmcons_receive_chars(unsigned long data)
+{
+ struct srmcons_private *srmconsp = (struct srmcons_private *)data;
+ unsigned long flags;
+ int incr = 10;
+
+ local_irq_save(flags);
+ if (spin_trylock(&srmcons_callback_lock)) {
+ if (!srmcons_do_receive_chars(srmconsp->tty))
+ incr = 100;
+ spin_unlock(&srmcons_callback_lock);
+ }
+
+ spin_lock(&srmconsp->lock);
+ if (srmconsp->tty) {
+ srmconsp->timer.expires = jiffies + incr;
+ add_timer(&srmconsp->timer);
+ }
+ spin_unlock(&srmconsp->lock);
+
+ local_irq_restore(flags);
+}
+
+/* called with callback_lock held */
+static int
+srmcons_do_write(struct tty_struct *tty, const char *buf, int count)
+{
+ static char str_cr[1] = "\r";
+ long c, remaining = count;
+ srmcons_result result;
+ char *cur;
+ int need_cr;
+
+ for (cur = (char *)buf; remaining > 0; ) {
+ need_cr = 0;
+ /*
+ * Break it up into reasonable size chunks to allow a chance
+ * for input to get in
+ */
+ for (c = 0; c < min_t(long, 128L, remaining) && !need_cr; c++)
+ if (cur[c] == '\n')
+ need_cr = 1;
+
+ while (c > 0) {
+ result.as_long = callback_puts(0, cur, c);
+ c -= result.bits.c;
+ remaining -= result.bits.c;
+ cur += result.bits.c;
+
+ /*
+ * Check for pending input iff a tty was provided
+ */
+ if (tty)
+ srmcons_do_receive_chars(tty);
+ }
+
+ while (need_cr) {
+ result.as_long = callback_puts(0, str_cr, 1);
+ if (result.bits.c > 0)
+ need_cr = 0;
+ }
+ }
+ return count;
+}
+
+static int
+srmcons_write(struct tty_struct *tty,
+ const unsigned char *buf, int count)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&srmcons_callback_lock, flags);
+ srmcons_do_write(tty, (const char *) buf, count);
+ spin_unlock_irqrestore(&srmcons_callback_lock, flags);
+
+ return count;
+}
+
+static int
+srmcons_write_room(struct tty_struct *tty)
+{
+ return 512;
+}
+
+static int
+srmcons_chars_in_buffer(struct tty_struct *tty)
+{
+ return 0;
+}
+
+static int
+srmcons_get_private_struct(struct srmcons_private **ps)
+{
+ static struct srmcons_private *srmconsp = NULL;
+ static DEFINE_SPINLOCK(srmconsp_lock);
+ unsigned long flags;
+ int retval = 0;
+
+ if (srmconsp == NULL) {
+ spin_lock_irqsave(&srmconsp_lock, flags);
+
+ srmconsp = kmalloc(sizeof(*srmconsp), GFP_KERNEL);
+ if (srmconsp == NULL)
+ retval = -ENOMEM;
+ else {
+ srmconsp->tty = NULL;
+ spin_lock_init(&srmconsp->lock);
+ init_timer(&srmconsp->timer);
+ }
+
+ spin_unlock_irqrestore(&srmconsp_lock, flags);
+ }
+
+ *ps = srmconsp;
+ return retval;
+}
+
+static int
+srmcons_open(struct tty_struct *tty, struct file *filp)
+{
+ struct srmcons_private *srmconsp;
+ unsigned long flags;
+ int retval;
+
+ retval = srmcons_get_private_struct(&srmconsp);
+ if (retval)
+ return retval;
+
+ spin_lock_irqsave(&srmconsp->lock, flags);
+
+ if (!srmconsp->tty) {
+ tty->driver_data = srmconsp;
+
+ srmconsp->tty = tty;
+ srmconsp->timer.function = srmcons_receive_chars;
+ srmconsp->timer.data = (unsigned long)srmconsp;
+ srmconsp->timer.expires = jiffies + 10;
+ add_timer(&srmconsp->timer);
+ }
+
+ spin_unlock_irqrestore(&srmconsp->lock, flags);
+
+ return 0;
+}
+
+static void
+srmcons_close(struct tty_struct *tty, struct file *filp)
+{
+ struct srmcons_private *srmconsp = tty->driver_data;
+ unsigned long flags;
+
+ spin_lock_irqsave(&srmconsp->lock, flags);
+
+ if (tty->count == 1) {
+ srmconsp->tty = NULL;
+ del_timer(&srmconsp->timer);
+ }
+
+ spin_unlock_irqrestore(&srmconsp->lock, flags);
+}
+
+
+static struct tty_driver *srmcons_driver;
+
+static struct tty_operations srmcons_ops = {
+ .open = srmcons_open,
+ .close = srmcons_close,
+ .write = srmcons_write,
+ .write_room = srmcons_write_room,
+ .chars_in_buffer= srmcons_chars_in_buffer,
+};
+
+static int __init
+srmcons_init(void)
+{
+ if (srm_is_registered_console) {
+ struct tty_driver *driver;
+ int err;
+
+ driver = alloc_tty_driver(MAX_SRM_CONSOLE_DEVICES);
+ if (!driver)
+ return -ENOMEM;
+ driver->driver_name = "srm";
+ driver->name = "srm";
+ driver->major = 0; /* dynamic */
+ driver->minor_start = 0;
+ driver->type = TTY_DRIVER_TYPE_SYSTEM;
+ driver->subtype = SYSTEM_TYPE_SYSCONS;
+ driver->init_termios = tty_std_termios;
+ tty_set_operations(driver, &srmcons_ops);
+ err = tty_register_driver(driver);
+ if (err) {
+ put_tty_driver(driver);
+ return err;
+ }
+ srmcons_driver = driver;
+ }
+
+ return -ENODEV;
+}
+
+module_init(srmcons_init);
+
+
+/*
+ * The console driver
+ */
+static void
+srm_console_write(struct console *co, const char *s, unsigned count)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&srmcons_callback_lock, flags);
+ srmcons_do_write(NULL, s, count);
+ spin_unlock_irqrestore(&srmcons_callback_lock, flags);
+}
+
+static struct tty_driver *
+srm_console_device(struct console *co, int *index)
+{
+ *index = co->index;
+ return srmcons_driver;
+}
+
+static int __init
+srm_console_setup(struct console *co, char *options)
+{
+ return 0;
+}
+
+static struct console srmcons = {
+ .name = "srm",
+ .write = srm_console_write,
+ .device = srm_console_device,
+ .setup = srm_console_setup,
+ .flags = CON_PRINTBUFFER,
+ .index = -1,
+};
+
+void __init
+register_srm_console(void)
+{
+ if (!srm_is_registered_console) {
+ callback_open_console();
+ register_console(&srmcons);
+ srm_is_registered_console = 1;
+ }
+}
+
+void __init
+unregister_srm_console(void)
+{
+ if (srm_is_registered_console) {
+ callback_close_console();
+ unregister_console(&srmcons);
+ srm_is_registered_console = 0;
+ }
+}
diff --git a/arch/alpha/kernel/sys_alcor.c b/arch/alpha/kernel/sys_alcor.c
new file mode 100644
index 000000000000..145dcde143ae
--- /dev/null
+++ b/arch/alpha/kernel/sys_alcor.c
@@ -0,0 +1,326 @@
+/*
+ * linux/arch/alpha/kernel/sys_alcor.c
+ *
+ * Copyright (C) 1995 David A Rusling
+ * Copyright (C) 1996 Jay A Estabrook
+ * Copyright (C) 1998, 1999 Richard Henderson
+ *
+ * Code supporting the ALCOR and XLT (XL-300/366/433).
+ */
+
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/mm.h>
+#include <linux/sched.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/reboot.h>
+#include <linux/bitops.h>
+
+#include <asm/ptrace.h>
+#include <asm/system.h>
+#include <asm/io.h>
+#include <asm/dma.h>
+#include <asm/mmu_context.h>
+#include <asm/irq.h>
+#include <asm/pgtable.h>
+#include <asm/core_cia.h>
+#include <asm/tlbflush.h>
+
+#include "proto.h"
+#include "irq_impl.h"
+#include "pci_impl.h"
+#include "machvec_impl.h"
+
+
+/* Note mask bit is true for ENABLED irqs. */
+static unsigned long cached_irq_mask;
+
+static inline void
+alcor_update_irq_hw(unsigned long mask)
+{
+ *(vuip)GRU_INT_MASK = mask;
+ mb();
+}
+
+static inline void
+alcor_enable_irq(unsigned int irq)
+{
+ alcor_update_irq_hw(cached_irq_mask |= 1UL << (irq - 16));
+}
+
+static void
+alcor_disable_irq(unsigned int irq)
+{
+ alcor_update_irq_hw(cached_irq_mask &= ~(1UL << (irq - 16)));
+}
+
+static void
+alcor_mask_and_ack_irq(unsigned int irq)
+{
+ alcor_disable_irq(irq);
+
+ /* On ALCOR/XLT, need to dismiss interrupt via GRU. */
+ *(vuip)GRU_INT_CLEAR = 1 << (irq - 16); mb();
+ *(vuip)GRU_INT_CLEAR = 0; mb();
+}
+
+static unsigned int
+alcor_startup_irq(unsigned int irq)
+{
+ alcor_enable_irq(irq);
+ return 0;
+}
+
+static void
+alcor_isa_mask_and_ack_irq(unsigned int irq)
+{
+ i8259a_mask_and_ack_irq(irq);
+
+ /* On ALCOR/XLT, need to dismiss interrupt via GRU. */
+ *(vuip)GRU_INT_CLEAR = 0x80000000; mb();
+ *(vuip)GRU_INT_CLEAR = 0; mb();
+}
+
+static void
+alcor_end_irq(unsigned int irq)
+{
+ if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS)))
+ alcor_enable_irq(irq);
+}
+
+static struct hw_interrupt_type alcor_irq_type = {
+ .typename = "ALCOR",
+ .startup = alcor_startup_irq,
+ .shutdown = alcor_disable_irq,
+ .enable = alcor_enable_irq,
+ .disable = alcor_disable_irq,
+ .ack = alcor_mask_and_ack_irq,
+ .end = alcor_end_irq,
+};
+
+static void
+alcor_device_interrupt(unsigned long vector, struct pt_regs *regs)
+{
+ unsigned long pld;
+ unsigned int i;
+
+ /* Read the interrupt summary register of the GRU */
+ pld = (*(vuip)GRU_INT_REQ) & GRU_INT_REQ_BITS;
+
+ /*
+ * Now for every possible bit set, work through them and call
+ * the appropriate interrupt handler.
+ */
+ while (pld) {
+ i = ffz(~pld);
+ pld &= pld - 1; /* clear least bit set */
+ if (i == 31) {
+ isa_device_interrupt(vector, regs);
+ } else {
+ handle_irq(16 + i, regs);
+ }
+ }
+}
+
+static void __init
+alcor_init_irq(void)
+{
+ long i;
+
+ if (alpha_using_srm)
+ alpha_mv.device_interrupt = srm_device_interrupt;
+
+ *(vuip)GRU_INT_MASK = 0; mb(); /* all disabled */
+ *(vuip)GRU_INT_EDGE = 0; mb(); /* all are level */
+ *(vuip)GRU_INT_HILO = 0x80000000U; mb(); /* ISA only HI */
+ *(vuip)GRU_INT_CLEAR = 0; mb(); /* all clear */
+
+ for (i = 16; i < 48; ++i) {
+ /* On Alcor, at least, lines 20..30 are not connected
+ and can generate spurrious interrupts if we turn them
+ on while IRQ probing. */
+ if (i >= 16+20 && i <= 16+30)
+ continue;
+ irq_desc[i].status = IRQ_DISABLED | IRQ_LEVEL;
+ irq_desc[i].handler = &alcor_irq_type;
+ }
+ i8259a_irq_type.ack = alcor_isa_mask_and_ack_irq;
+
+ init_i8259a_irqs();
+ common_init_isa_dma();
+
+ setup_irq(16+31, &isa_cascade_irqaction);
+}
+
+
+/*
+ * PCI Fixup configuration.
+ *
+ * Summary @ GRU_INT_REQ:
+ * Bit Meaning
+ * 0 Interrupt Line A from slot 2
+ * 1 Interrupt Line B from slot 2
+ * 2 Interrupt Line C from slot 2
+ * 3 Interrupt Line D from slot 2
+ * 4 Interrupt Line A from slot 1
+ * 5 Interrupt line B from slot 1
+ * 6 Interrupt Line C from slot 1
+ * 7 Interrupt Line D from slot 1
+ * 8 Interrupt Line A from slot 0
+ * 9 Interrupt Line B from slot 0
+ *10 Interrupt Line C from slot 0
+ *11 Interrupt Line D from slot 0
+ *12 Interrupt Line A from slot 4
+ *13 Interrupt Line B from slot 4
+ *14 Interrupt Line C from slot 4
+ *15 Interrupt Line D from slot 4
+ *16 Interrupt Line D from slot 3
+ *17 Interrupt Line D from slot 3
+ *18 Interrupt Line D from slot 3
+ *19 Interrupt Line D from slot 3
+ *20-30 Reserved
+ *31 EISA interrupt
+ *
+ * The device to slot mapping looks like:
+ *
+ * Slot Device
+ * 6 built-in TULIP (XLT only)
+ * 7 PCI on board slot 0
+ * 8 PCI on board slot 3
+ * 9 PCI on board slot 4
+ * 10 PCEB (PCI-EISA bridge)
+ * 11 PCI on board slot 2
+ * 12 PCI on board slot 1
+ *
+ *
+ * This two layered interrupt approach means that we allocate IRQ 16 and
+ * above for PCI interrupts. The IRQ relates to which bit the interrupt
+ * comes in on. This makes interrupt processing much easier.
+ */
+
+static int __init
+alcor_map_irq(struct pci_dev *dev, u8 slot, u8 pin)
+{
+ static char irq_tab[7][5] __initdata = {
+ /*INT INTA INTB INTC INTD */
+ /* note: IDSEL 17 is XLT only */
+ {16+13, 16+13, 16+13, 16+13, 16+13}, /* IdSel 17, TULIP */
+ { 16+8, 16+8, 16+9, 16+10, 16+11}, /* IdSel 18, slot 0 */
+ {16+16, 16+16, 16+17, 16+18, 16+19}, /* IdSel 19, slot 3 */
+ {16+12, 16+12, 16+13, 16+14, 16+15}, /* IdSel 20, slot 4 */
+ { -1, -1, -1, -1, -1}, /* IdSel 21, PCEB */
+ { 16+0, 16+0, 16+1, 16+2, 16+3}, /* IdSel 22, slot 2 */
+ { 16+4, 16+4, 16+5, 16+6, 16+7}, /* IdSel 23, slot 1 */
+ };
+ const long min_idsel = 6, max_idsel = 12, irqs_per_slot = 5;
+ return COMMON_TABLE_LOOKUP;
+}
+
+static void
+alcor_kill_arch(int mode)
+{
+ cia_kill_arch(mode);
+
+#ifndef ALPHA_RESTORE_SRM_SETUP
+ switch(mode) {
+ case LINUX_REBOOT_CMD_RESTART:
+ /* Who said DEC engineer's have no sense of humor? ;-) */
+ if (alpha_using_srm) {
+ *(vuip) GRU_RESET = 0x0000dead;
+ mb();
+ }
+ break;
+ case LINUX_REBOOT_CMD_HALT:
+ break;
+ case LINUX_REBOOT_CMD_POWER_OFF:
+ break;
+ }
+
+ halt();
+#endif
+}
+
+static void __init
+alcor_init_pci(void)
+{
+ struct pci_dev *dev;
+
+ cia_init_pci();
+
+ /*
+ * Now we can look to see if we are really running on an XLT-type
+ * motherboard, by looking for a 21040 TULIP in slot 6, which is
+ * built into XLT and BRET/MAVERICK, but not available on ALCOR.
+ */
+ dev = pci_find_device(PCI_VENDOR_ID_DEC,
+ PCI_DEVICE_ID_DEC_TULIP,
+ NULL);
+ if (dev && dev->devfn == PCI_DEVFN(6,0)) {
+ alpha_mv.sys.cia.gru_int_req_bits = XLT_GRU_INT_REQ_BITS;
+ printk(KERN_INFO "%s: Detected AS500 or XLT motherboard.\n",
+ __FUNCTION__);
+ }
+}
+
+
+/*
+ * The System Vectors
+ */
+
+struct alpha_machine_vector alcor_mv __initmv = {
+ .vector_name = "Alcor",
+ DO_EV5_MMU,
+ DO_DEFAULT_RTC,
+ DO_CIA_IO,
+ .machine_check = cia_machine_check,
+ .max_isa_dma_address = ALPHA_ALCOR_MAX_ISA_DMA_ADDRESS,
+ .min_io_address = EISA_DEFAULT_IO_BASE,
+ .min_mem_address = CIA_DEFAULT_MEM_BASE,
+
+ .nr_irqs = 48,
+ .device_interrupt = alcor_device_interrupt,
+
+ .init_arch = cia_init_arch,
+ .init_irq = alcor_init_irq,
+ .init_rtc = common_init_rtc,
+ .init_pci = alcor_init_pci,
+ .kill_arch = alcor_kill_arch,
+ .pci_map_irq = alcor_map_irq,
+ .pci_swizzle = common_swizzle,
+
+ .sys = { .cia = {
+ .gru_int_req_bits = ALCOR_GRU_INT_REQ_BITS
+ }}
+};
+ALIAS_MV(alcor)
+
+struct alpha_machine_vector xlt_mv __initmv = {
+ .vector_name = "XLT",
+ DO_EV5_MMU,
+ DO_DEFAULT_RTC,
+ DO_CIA_IO,
+ .machine_check = cia_machine_check,
+ .max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS,
+ .min_io_address = EISA_DEFAULT_IO_BASE,
+ .min_mem_address = CIA_DEFAULT_MEM_BASE,
+
+ .nr_irqs = 48,
+ .device_interrupt = alcor_device_interrupt,
+
+ .init_arch = cia_init_arch,
+ .init_irq = alcor_init_irq,
+ .init_rtc = common_init_rtc,
+ .init_pci = alcor_init_pci,
+ .kill_arch = alcor_kill_arch,
+ .pci_map_irq = alcor_map_irq,
+ .pci_swizzle = common_swizzle,
+
+ .sys = { .cia = {
+ .gru_int_req_bits = XLT_GRU_INT_REQ_BITS
+ }}
+};
+
+/* No alpha_mv alias for XLT, since we compile it in unconditionally
+ with ALCOR; setup_arch knows how to cope. */
diff --git a/arch/alpha/kernel/sys_cabriolet.c b/arch/alpha/kernel/sys_cabriolet.c
new file mode 100644
index 000000000000..8e3374d34c95
--- /dev/null
+++ b/arch/alpha/kernel/sys_cabriolet.c
@@ -0,0 +1,448 @@
+/*
+ * linux/arch/alpha/kernel/sys_cabriolet.c
+ *
+ * Copyright (C) 1995 David A Rusling
+ * Copyright (C) 1996 Jay A Estabrook
+ * Copyright (C) 1998, 1999, 2000 Richard Henderson
+ *
+ * Code supporting the Cabriolet (AlphaPC64), EB66+, and EB164,
+ * PC164 and LX164.
+ */
+
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/mm.h>
+#include <linux/sched.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/bitops.h>
+
+#include <asm/ptrace.h>
+#include <asm/system.h>
+#include <asm/dma.h>
+#include <asm/irq.h>
+#include <asm/mmu_context.h>
+#include <asm/io.h>
+#include <asm/pgtable.h>
+#include <asm/core_apecs.h>
+#include <asm/core_cia.h>
+#include <asm/core_lca.h>
+#include <asm/tlbflush.h>
+
+#include "proto.h"
+#include "irq_impl.h"
+#include "pci_impl.h"
+#include "machvec_impl.h"
+
+
+/* Note mask bit is true for DISABLED irqs. */
+static unsigned long cached_irq_mask = ~0UL;
+
+static inline void
+cabriolet_update_irq_hw(unsigned int irq, unsigned long mask)
+{
+ int ofs = (irq - 16) / 8;
+ outb(mask >> (16 + ofs * 8), 0x804 + ofs);
+}
+
+static inline void
+cabriolet_enable_irq(unsigned int irq)
+{
+ cabriolet_update_irq_hw(irq, cached_irq_mask &= ~(1UL << irq));
+}
+
+static void
+cabriolet_disable_irq(unsigned int irq)
+{
+ cabriolet_update_irq_hw(irq, cached_irq_mask |= 1UL << irq);
+}
+
+static unsigned int
+cabriolet_startup_irq(unsigned int irq)
+{
+ cabriolet_enable_irq(irq);
+ return 0; /* never anything pending */
+}
+
+static void
+cabriolet_end_irq(unsigned int irq)
+{
+ if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS)))
+ cabriolet_enable_irq(irq);
+}
+
+static struct hw_interrupt_type cabriolet_irq_type = {
+ .typename = "CABRIOLET",
+ .startup = cabriolet_startup_irq,
+ .shutdown = cabriolet_disable_irq,
+ .enable = cabriolet_enable_irq,
+ .disable = cabriolet_disable_irq,
+ .ack = cabriolet_disable_irq,
+ .end = cabriolet_end_irq,
+};
+
+static void
+cabriolet_device_interrupt(unsigned long v, struct pt_regs *r)
+{
+ unsigned long pld;
+ unsigned int i;
+
+ /* Read the interrupt summary registers */
+ pld = inb(0x804) | (inb(0x805) << 8) | (inb(0x806) << 16);
+
+ /*
+ * Now for every possible bit set, work through them and call
+ * the appropriate interrupt handler.
+ */
+ while (pld) {
+ i = ffz(~pld);
+ pld &= pld - 1; /* clear least bit set */
+ if (i == 4) {
+ isa_device_interrupt(v, r);
+ } else {
+ handle_irq(16 + i, r);
+ }
+ }
+}
+
+static void __init
+common_init_irq(void (*srm_dev_int)(unsigned long v, struct pt_regs *r))
+{
+ init_i8259a_irqs();
+
+ if (alpha_using_srm) {
+ alpha_mv.device_interrupt = srm_dev_int;
+ init_srm_irqs(35, 0);
+ }
+ else {
+ long i;
+
+ outb(0xff, 0x804);
+ outb(0xff, 0x805);
+ outb(0xff, 0x806);
+
+ for (i = 16; i < 35; ++i) {
+ irq_desc[i].status = IRQ_DISABLED | IRQ_LEVEL;
+ irq_desc[i].handler = &cabriolet_irq_type;
+ }
+ }
+
+ common_init_isa_dma();
+ setup_irq(16+4, &isa_cascade_irqaction);
+}
+
+#ifndef CONFIG_ALPHA_PC164
+static void __init
+cabriolet_init_irq(void)
+{
+ common_init_irq(srm_device_interrupt);
+}
+#endif
+
+#if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_PC164)
+/* In theory, the PC164 has the same interrupt hardware as the other
+ Cabriolet based systems. However, something got screwed up late
+ in the development cycle which broke the interrupt masking hardware.
+ Repeat, it is not possible to mask and ack interrupts. At all.
+
+ In an attempt to work around this, while processing interrupts,
+ we do not allow the IPL to drop below what it is currently. This
+ prevents the possibility of recursion.
+
+ ??? Another option might be to force all PCI devices to use edge
+ triggered rather than level triggered interrupts. That might be
+ too invasive though. */
+
+static void
+pc164_srm_device_interrupt(unsigned long v, struct pt_regs *r)
+{
+ __min_ipl = getipl();
+ srm_device_interrupt(v, r);
+ __min_ipl = 0;
+}
+
+static void
+pc164_device_interrupt(unsigned long v, struct pt_regs *r)
+{
+ __min_ipl = getipl();
+ cabriolet_device_interrupt(v, r);
+ __min_ipl = 0;
+}
+
+static void __init
+pc164_init_irq(void)
+{
+ common_init_irq(pc164_srm_device_interrupt);
+}
+#endif
+
+/*
+ * The EB66+ is very similar to the EB66 except that it does not have
+ * the on-board NCR and Tulip chips. In the code below, I have used
+ * slot number to refer to the id select line and *not* the slot
+ * number used in the EB66+ documentation. However, in the table,
+ * I've given the slot number, the id select line and the Jxx number
+ * that's printed on the board. The interrupt pins from the PCI slots
+ * are wired into 3 interrupt summary registers at 0x804, 0x805 and
+ * 0x806 ISA.
+ *
+ * In the table, -1 means don't assign an IRQ number. This is usually
+ * because it is the Saturn IO (SIO) PCI/ISA Bridge Chip.
+ */
+
+static inline int __init
+eb66p_map_irq(struct pci_dev *dev, u8 slot, u8 pin)
+{
+ static char irq_tab[5][5] __initdata = {
+ /*INT INTA INTB INTC INTD */
+ {16+0, 16+0, 16+5, 16+9, 16+13}, /* IdSel 6, slot 0, J25 */
+ {16+1, 16+1, 16+6, 16+10, 16+14}, /* IdSel 7, slot 1, J26 */
+ { -1, -1, -1, -1, -1}, /* IdSel 8, SIO */
+ {16+2, 16+2, 16+7, 16+11, 16+15}, /* IdSel 9, slot 2, J27 */
+ {16+3, 16+3, 16+8, 16+12, 16+6} /* IdSel 10, slot 3, J28 */
+ };
+ const long min_idsel = 6, max_idsel = 10, irqs_per_slot = 5;
+ return COMMON_TABLE_LOOKUP;
+}
+
+
+/*
+ * The AlphaPC64 is very similar to the EB66+ except that its slots
+ * are numbered differently. In the code below, I have used slot
+ * number to refer to the id select line and *not* the slot number
+ * used in the AlphaPC64 documentation. However, in the table, I've
+ * given the slot number, the id select line and the Jxx number that's
+ * printed on the board. The interrupt pins from the PCI slots are
+ * wired into 3 interrupt summary registers at 0x804, 0x805 and 0x806
+ * ISA.
+ *
+ * In the table, -1 means don't assign an IRQ number. This is usually
+ * because it is the Saturn IO (SIO) PCI/ISA Bridge Chip.
+ */
+
+static inline int __init
+cabriolet_map_irq(struct pci_dev *dev, u8 slot, u8 pin)
+{
+ static char irq_tab[5][5] __initdata = {
+ /*INT INTA INTB INTC INTD */
+ { 16+2, 16+2, 16+7, 16+11, 16+15}, /* IdSel 5, slot 2, J21 */
+ { 16+0, 16+0, 16+5, 16+9, 16+13}, /* IdSel 6, slot 0, J19 */
+ { 16+1, 16+1, 16+6, 16+10, 16+14}, /* IdSel 7, slot 1, J20 */
+ { -1, -1, -1, -1, -1}, /* IdSel 8, SIO */
+ { 16+3, 16+3, 16+8, 16+12, 16+16} /* IdSel 9, slot 3, J22 */
+ };
+ const long min_idsel = 5, max_idsel = 9, irqs_per_slot = 5;
+ return COMMON_TABLE_LOOKUP;
+}
+
+static inline void __init
+cabriolet_init_pci(void)
+{
+ common_init_pci();
+ ns87312_enable_ide(0x398);
+}
+
+static inline void __init
+cia_cab_init_pci(void)
+{
+ cia_init_pci();
+ ns87312_enable_ide(0x398);
+}
+
+/*
+ * The PC164 and LX164 have 19 PCI interrupts, four from each of the four
+ * PCI slots, the SIO, PCI/IDE, and USB.
+ *
+ * Each of the interrupts can be individually masked. This is
+ * accomplished by setting the appropriate bit in the mask register.
+ * A bit is set by writing a "1" to the desired position in the mask
+ * register and cleared by writing a "0". There are 3 mask registers
+ * located at ISA address 804h, 805h and 806h.
+ *
+ * An I/O read at ISA address 804h, 805h, 806h will return the
+ * state of the 11 PCI interrupts and not the state of the MASKED
+ * interrupts.
+ *
+ * Note: A write to I/O 804h, 805h, and 806h the mask register will be
+ * updated.
+ *
+ *
+ * ISA DATA<7:0>
+ * ISA +--------------------------------------------------------------+
+ * ADDRESS | 7 | 6 | 5 | 4 | 3 | 2 | 1 | 0 |
+ * +==============================================================+
+ * 0x804 | INTB0 | USB | IDE | SIO | INTA3 |INTA2 | INTA1 | INTA0 |
+ * +--------------------------------------------------------------+
+ * 0x805 | INTD0 | INTC3 | INTC2 | INTC1 | INTC0 |INTB3 | INTB2 | INTB1 |
+ * +--------------------------------------------------------------+
+ * 0x806 | Rsrv | Rsrv | Rsrv | Rsrv | Rsrv |INTD3 | INTD2 | INTD1 |
+ * +--------------------------------------------------------------+
+ * * Rsrv = reserved bits
+ * Note: The mask register is write-only.
+ *
+ * IdSel
+ * 5 32 bit PCI option slot 2
+ * 6 64 bit PCI option slot 0
+ * 7 64 bit PCI option slot 1
+ * 8 Saturn I/O
+ * 9 32 bit PCI option slot 3
+ * 10 USB
+ * 11 IDE
+ *
+ */
+
+static inline int __init
+alphapc164_map_irq(struct pci_dev *dev, u8 slot, u8 pin)
+{
+ static char irq_tab[7][5] __initdata = {
+ /*INT INTA INTB INTC INTD */
+ { 16+2, 16+2, 16+9, 16+13, 16+17}, /* IdSel 5, slot 2, J20 */
+ { 16+0, 16+0, 16+7, 16+11, 16+15}, /* IdSel 6, slot 0, J29 */
+ { 16+1, 16+1, 16+8, 16+12, 16+16}, /* IdSel 7, slot 1, J26 */
+ { -1, -1, -1, -1, -1}, /* IdSel 8, SIO */
+ { 16+3, 16+3, 16+10, 16+14, 16+18}, /* IdSel 9, slot 3, J19 */
+ { 16+6, 16+6, 16+6, 16+6, 16+6}, /* IdSel 10, USB */
+ { 16+5, 16+5, 16+5, 16+5, 16+5} /* IdSel 11, IDE */
+ };
+ const long min_idsel = 5, max_idsel = 11, irqs_per_slot = 5;
+ return COMMON_TABLE_LOOKUP;
+}
+
+static inline void __init
+alphapc164_init_pci(void)
+{
+ cia_init_pci();
+ SMC93x_Init();
+}
+
+
+/*
+ * The System Vector
+ */
+
+#if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_CABRIOLET)
+struct alpha_machine_vector cabriolet_mv __initmv = {
+ .vector_name = "Cabriolet",
+ DO_EV4_MMU,
+ DO_DEFAULT_RTC,
+ DO_APECS_IO,
+ .machine_check = apecs_machine_check,
+ .max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS,
+ .min_io_address = DEFAULT_IO_BASE,
+ .min_mem_address = APECS_AND_LCA_DEFAULT_MEM_BASE,
+
+ .nr_irqs = 35,
+ .device_interrupt = cabriolet_device_interrupt,
+
+ .init_arch = apecs_init_arch,
+ .init_irq = cabriolet_init_irq,
+ .init_rtc = common_init_rtc,
+ .init_pci = cabriolet_init_pci,
+ .pci_map_irq = cabriolet_map_irq,
+ .pci_swizzle = common_swizzle,
+};
+#ifndef CONFIG_ALPHA_EB64P
+ALIAS_MV(cabriolet)
+#endif
+#endif
+
+#if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_EB164)
+struct alpha_machine_vector eb164_mv __initmv = {
+ .vector_name = "EB164",
+ DO_EV5_MMU,
+ DO_DEFAULT_RTC,
+ DO_CIA_IO,
+ .machine_check = cia_machine_check,
+ .max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS,
+ .min_io_address = DEFAULT_IO_BASE,
+ .min_mem_address = CIA_DEFAULT_MEM_BASE,
+
+ .nr_irqs = 35,
+ .device_interrupt = cabriolet_device_interrupt,
+
+ .init_arch = cia_init_arch,
+ .init_irq = cabriolet_init_irq,
+ .init_rtc = common_init_rtc,
+ .init_pci = cia_cab_init_pci,
+ .kill_arch = cia_kill_arch,
+ .pci_map_irq = cabriolet_map_irq,
+ .pci_swizzle = common_swizzle,
+};
+ALIAS_MV(eb164)
+#endif
+
+#if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_EB66P)
+struct alpha_machine_vector eb66p_mv __initmv = {
+ .vector_name = "EB66+",
+ DO_EV4_MMU,
+ DO_DEFAULT_RTC,
+ DO_LCA_IO,
+ .machine_check = lca_machine_check,
+ .max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS,
+ .min_io_address = DEFAULT_IO_BASE,
+ .min_mem_address = APECS_AND_LCA_DEFAULT_MEM_BASE,
+
+ .nr_irqs = 35,
+ .device_interrupt = cabriolet_device_interrupt,
+
+ .init_arch = lca_init_arch,
+ .init_irq = cabriolet_init_irq,
+ .init_rtc = common_init_rtc,
+ .init_pci = cabriolet_init_pci,
+ .pci_map_irq = eb66p_map_irq,
+ .pci_swizzle = common_swizzle,
+};
+ALIAS_MV(eb66p)
+#endif
+
+#if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_LX164)
+struct alpha_machine_vector lx164_mv __initmv = {
+ .vector_name = "LX164",
+ DO_EV5_MMU,
+ DO_DEFAULT_RTC,
+ DO_PYXIS_IO,
+ .machine_check = cia_machine_check,
+ .max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS,
+ .min_io_address = DEFAULT_IO_BASE,
+ .min_mem_address = DEFAULT_MEM_BASE,
+ .pci_dac_offset = PYXIS_DAC_OFFSET,
+
+ .nr_irqs = 35,
+ .device_interrupt = cabriolet_device_interrupt,
+
+ .init_arch = pyxis_init_arch,
+ .init_irq = cabriolet_init_irq,
+ .init_rtc = common_init_rtc,
+ .init_pci = alphapc164_init_pci,
+ .kill_arch = cia_kill_arch,
+ .pci_map_irq = alphapc164_map_irq,
+ .pci_swizzle = common_swizzle,
+};
+ALIAS_MV(lx164)
+#endif
+
+#if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_PC164)
+struct alpha_machine_vector pc164_mv __initmv = {
+ .vector_name = "PC164",
+ DO_EV5_MMU,
+ DO_DEFAULT_RTC,
+ DO_CIA_IO,
+ .machine_check = cia_machine_check,
+ .max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS,
+ .min_io_address = DEFAULT_IO_BASE,
+ .min_mem_address = CIA_DEFAULT_MEM_BASE,
+
+ .nr_irqs = 35,
+ .device_interrupt = pc164_device_interrupt,
+
+ .init_arch = cia_init_arch,
+ .init_irq = pc164_init_irq,
+ .init_rtc = common_init_rtc,
+ .init_pci = alphapc164_init_pci,
+ .kill_arch = cia_kill_arch,
+ .pci_map_irq = alphapc164_map_irq,
+ .pci_swizzle = common_swizzle,
+};
+ALIAS_MV(pc164)
+#endif
diff --git a/arch/alpha/kernel/sys_dp264.c b/arch/alpha/kernel/sys_dp264.c
new file mode 100644
index 000000000000..9e36b07fa940
--- /dev/null
+++ b/arch/alpha/kernel/sys_dp264.c
@@ -0,0 +1,689 @@
+/*
+ * linux/arch/alpha/kernel/sys_dp264.c
+ *
+ * Copyright (C) 1995 David A Rusling
+ * Copyright (C) 1996, 1999 Jay A Estabrook
+ * Copyright (C) 1998, 1999 Richard Henderson
+ *
+ * Modified by Christopher C. Chimelis, 2001 to
+ * add support for the addition of Shark to the
+ * Tsunami family.
+ *
+ * Code supporting the DP264 (EV6+TSUNAMI).
+ */
+
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/mm.h>
+#include <linux/sched.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/bitops.h>
+
+#include <asm/ptrace.h>
+#include <asm/system.h>
+#include <asm/dma.h>
+#include <asm/irq.h>
+#include <asm/mmu_context.h>
+#include <asm/io.h>
+#include <asm/pgtable.h>
+#include <asm/core_tsunami.h>
+#include <asm/hwrpb.h>
+#include <asm/tlbflush.h>
+
+#include "proto.h"
+#include "irq_impl.h"
+#include "pci_impl.h"
+#include "machvec_impl.h"
+
+
+/* Note mask bit is true for ENABLED irqs. */
+static unsigned long cached_irq_mask;
+/* dp264 boards handle at max four CPUs */
+static unsigned long cpu_irq_affinity[4] = { 0UL, 0UL, 0UL, 0UL };
+
+DEFINE_SPINLOCK(dp264_irq_lock);
+
+static void
+tsunami_update_irq_hw(unsigned long mask)
+{
+ register tsunami_cchip *cchip = TSUNAMI_cchip;
+ unsigned long isa_enable = 1UL << 55;
+ register int bcpu = boot_cpuid;
+
+#ifdef CONFIG_SMP
+ volatile unsigned long *dim0, *dim1, *dim2, *dim3;
+ unsigned long mask0, mask1, mask2, mask3, dummy;
+
+ mask &= ~isa_enable;
+ mask0 = mask & cpu_irq_affinity[0];
+ mask1 = mask & cpu_irq_affinity[1];
+ mask2 = mask & cpu_irq_affinity[2];
+ mask3 = mask & cpu_irq_affinity[3];
+
+ if (bcpu == 0) mask0 |= isa_enable;
+ else if (bcpu == 1) mask1 |= isa_enable;
+ else if (bcpu == 2) mask2 |= isa_enable;
+ else mask3 |= isa_enable;
+
+ dim0 = &cchip->dim0.csr;
+ dim1 = &cchip->dim1.csr;
+ dim2 = &cchip->dim2.csr;
+ dim3 = &cchip->dim3.csr;
+ if (!cpu_possible(0)) dim0 = &dummy;
+ if (!cpu_possible(1)) dim1 = &dummy;
+ if (!cpu_possible(2)) dim2 = &dummy;
+ if (!cpu_possible(3)) dim3 = &dummy;
+
+ *dim0 = mask0;
+ *dim1 = mask1;
+ *dim2 = mask2;
+ *dim3 = mask3;
+ mb();
+ *dim0;
+ *dim1;
+ *dim2;
+ *dim3;
+#else
+ volatile unsigned long *dimB;
+ if (bcpu == 0) dimB = &cchip->dim0.csr;
+ else if (bcpu == 1) dimB = &cchip->dim1.csr;
+ else if (bcpu == 2) dimB = &cchip->dim2.csr;
+ else dimB = &cchip->dim3.csr;
+
+ *dimB = mask | isa_enable;
+ mb();
+ *dimB;
+#endif
+}
+
+static void
+dp264_enable_irq(unsigned int irq)
+{
+ spin_lock(&dp264_irq_lock);
+ cached_irq_mask |= 1UL << irq;
+ tsunami_update_irq_hw(cached_irq_mask);
+ spin_unlock(&dp264_irq_lock);
+}
+
+static void
+dp264_disable_irq(unsigned int irq)
+{
+ spin_lock(&dp264_irq_lock);
+ cached_irq_mask &= ~(1UL << irq);
+ tsunami_update_irq_hw(cached_irq_mask);
+ spin_unlock(&dp264_irq_lock);
+}
+
+static unsigned int
+dp264_startup_irq(unsigned int irq)
+{
+ dp264_enable_irq(irq);
+ return 0; /* never anything pending */
+}
+
+static void
+dp264_end_irq(unsigned int irq)
+{
+ if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS)))
+ dp264_enable_irq(irq);
+}
+
+static void
+clipper_enable_irq(unsigned int irq)
+{
+ spin_lock(&dp264_irq_lock);
+ cached_irq_mask |= 1UL << (irq - 16);
+ tsunami_update_irq_hw(cached_irq_mask);
+ spin_unlock(&dp264_irq_lock);
+}
+
+static void
+clipper_disable_irq(unsigned int irq)
+{
+ spin_lock(&dp264_irq_lock);
+ cached_irq_mask &= ~(1UL << (irq - 16));
+ tsunami_update_irq_hw(cached_irq_mask);
+ spin_unlock(&dp264_irq_lock);
+}
+
+static unsigned int
+clipper_startup_irq(unsigned int irq)
+{
+ clipper_enable_irq(irq);
+ return 0; /* never anything pending */
+}
+
+static void
+clipper_end_irq(unsigned int irq)
+{
+ if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS)))
+ clipper_enable_irq(irq);
+}
+
+static void
+cpu_set_irq_affinity(unsigned int irq, cpumask_t affinity)
+{
+ int cpu;
+
+ for (cpu = 0; cpu < 4; cpu++) {
+ unsigned long aff = cpu_irq_affinity[cpu];
+ if (cpu_isset(cpu, affinity))
+ aff |= 1UL << irq;
+ else
+ aff &= ~(1UL << irq);
+ cpu_irq_affinity[cpu] = aff;
+ }
+}
+
+static void
+dp264_set_affinity(unsigned int irq, cpumask_t affinity)
+{
+ spin_lock(&dp264_irq_lock);
+ cpu_set_irq_affinity(irq, affinity);
+ tsunami_update_irq_hw(cached_irq_mask);
+ spin_unlock(&dp264_irq_lock);
+}
+
+static void
+clipper_set_affinity(unsigned int irq, cpumask_t affinity)
+{
+ spin_lock(&dp264_irq_lock);
+ cpu_set_irq_affinity(irq - 16, affinity);
+ tsunami_update_irq_hw(cached_irq_mask);
+ spin_unlock(&dp264_irq_lock);
+}
+
+static struct hw_interrupt_type dp264_irq_type = {
+ .typename = "DP264",
+ .startup = dp264_startup_irq,
+ .shutdown = dp264_disable_irq,
+ .enable = dp264_enable_irq,
+ .disable = dp264_disable_irq,
+ .ack = dp264_disable_irq,
+ .end = dp264_end_irq,
+ .set_affinity = dp264_set_affinity,
+};
+
+static struct hw_interrupt_type clipper_irq_type = {
+ .typename = "CLIPPER",
+ .startup = clipper_startup_irq,
+ .shutdown = clipper_disable_irq,
+ .enable = clipper_enable_irq,
+ .disable = clipper_disable_irq,
+ .ack = clipper_disable_irq,
+ .end = clipper_end_irq,
+ .set_affinity = clipper_set_affinity,
+};
+
+static void
+dp264_device_interrupt(unsigned long vector, struct pt_regs * regs)
+{
+#if 1
+ printk("dp264_device_interrupt: NOT IMPLEMENTED YET!! \n");
+#else
+ unsigned long pld;
+ unsigned int i;
+
+ /* Read the interrupt summary register of TSUNAMI */
+ pld = TSUNAMI_cchip->dir0.csr;
+
+ /*
+ * Now for every possible bit set, work through them and call
+ * the appropriate interrupt handler.
+ */
+ while (pld) {
+ i = ffz(~pld);
+ pld &= pld - 1; /* clear least bit set */
+ if (i == 55)
+ isa_device_interrupt(vector, regs);
+ else
+ handle_irq(16 + i, 16 + i, regs);
+#if 0
+ TSUNAMI_cchip->dir0.csr = 1UL << i; mb();
+ tmp = TSUNAMI_cchip->dir0.csr;
+#endif
+ }
+#endif
+}
+
+static void
+dp264_srm_device_interrupt(unsigned long vector, struct pt_regs * regs)
+{
+ int irq;
+
+ irq = (vector - 0x800) >> 4;
+
+ /*
+ * The SRM console reports PCI interrupts with a vector calculated by:
+ *
+ * 0x900 + (0x10 * DRIR-bit)
+ *
+ * So bit 16 shows up as IRQ 32, etc.
+ *
+ * On DP264/BRICK/MONET, we adjust it down by 16 because at least
+ * that many of the low order bits of the DRIR are not used, and
+ * so we don't count them.
+ */
+ if (irq >= 32)
+ irq -= 16;
+
+ handle_irq(irq, regs);
+}
+
+static void
+clipper_srm_device_interrupt(unsigned long vector, struct pt_regs * regs)
+{
+ int irq;
+
+ irq = (vector - 0x800) >> 4;
+
+/*
+ * The SRM console reports PCI interrupts with a vector calculated by:
+ *
+ * 0x900 + (0x10 * DRIR-bit)
+ *
+ * So bit 16 shows up as IRQ 32, etc.
+ *
+ * CLIPPER uses bits 8-47 for PCI interrupts, so we do not need
+ * to scale down the vector reported, we just use it.
+ *
+ * Eg IRQ 24 is DRIR bit 8, etc, etc
+ */
+ handle_irq(irq, regs);
+}
+
+static void __init
+init_tsunami_irqs(struct hw_interrupt_type * ops, int imin, int imax)
+{
+ long i;
+ for (i = imin; i <= imax; ++i) {
+ irq_desc[i].status = IRQ_DISABLED | IRQ_LEVEL;
+ irq_desc[i].handler = ops;
+ }
+}
+
+static void __init
+dp264_init_irq(void)
+{
+ outb(0, DMA1_RESET_REG);
+ outb(0, DMA2_RESET_REG);
+ outb(DMA_MODE_CASCADE, DMA2_MODE_REG);
+ outb(0, DMA2_MASK_REG);
+
+ if (alpha_using_srm)
+ alpha_mv.device_interrupt = dp264_srm_device_interrupt;
+
+ tsunami_update_irq_hw(0);
+
+ init_i8259a_irqs();
+ init_tsunami_irqs(&dp264_irq_type, 16, 47);
+}
+
+static void __init
+clipper_init_irq(void)
+{
+ outb(0, DMA1_RESET_REG);
+ outb(0, DMA2_RESET_REG);
+ outb(DMA_MODE_CASCADE, DMA2_MODE_REG);
+ outb(0, DMA2_MASK_REG);
+
+ if (alpha_using_srm)
+ alpha_mv.device_interrupt = clipper_srm_device_interrupt;
+
+ tsunami_update_irq_hw(0);
+
+ init_i8259a_irqs();
+ init_tsunami_irqs(&clipper_irq_type, 24, 63);
+}
+
+
+/*
+ * PCI Fixup configuration.
+ *
+ * Summary @ TSUNAMI_CSR_DIM0:
+ * Bit Meaning
+ * 0-17 Unused
+ *18 Interrupt SCSI B (Adaptec 7895 builtin)
+ *19 Interrupt SCSI A (Adaptec 7895 builtin)
+ *20 Interrupt Line D from slot 2 PCI0
+ *21 Interrupt Line C from slot 2 PCI0
+ *22 Interrupt Line B from slot 2 PCI0
+ *23 Interrupt Line A from slot 2 PCI0
+ *24 Interrupt Line D from slot 1 PCI0
+ *25 Interrupt Line C from slot 1 PCI0
+ *26 Interrupt Line B from slot 1 PCI0
+ *27 Interrupt Line A from slot 1 PCI0
+ *28 Interrupt Line D from slot 0 PCI0
+ *29 Interrupt Line C from slot 0 PCI0
+ *30 Interrupt Line B from slot 0 PCI0
+ *31 Interrupt Line A from slot 0 PCI0
+ *
+ *32 Interrupt Line D from slot 3 PCI1
+ *33 Interrupt Line C from slot 3 PCI1
+ *34 Interrupt Line B from slot 3 PCI1
+ *35 Interrupt Line A from slot 3 PCI1
+ *36 Interrupt Line D from slot 2 PCI1
+ *37 Interrupt Line C from slot 2 PCI1
+ *38 Interrupt Line B from slot 2 PCI1
+ *39 Interrupt Line A from slot 2 PCI1
+ *40 Interrupt Line D from slot 1 PCI1
+ *41 Interrupt Line C from slot 1 PCI1
+ *42 Interrupt Line B from slot 1 PCI1
+ *43 Interrupt Line A from slot 1 PCI1
+ *44 Interrupt Line D from slot 0 PCI1
+ *45 Interrupt Line C from slot 0 PCI1
+ *46 Interrupt Line B from slot 0 PCI1
+ *47 Interrupt Line A from slot 0 PCI1
+ *48-52 Unused
+ *53 PCI0 NMI (from Cypress)
+ *54 PCI0 SMI INT (from Cypress)
+ *55 PCI0 ISA Interrupt (from Cypress)
+ *56-60 Unused
+ *61 PCI1 Bus Error
+ *62 PCI0 Bus Error
+ *63 Reserved
+ *
+ * IdSel
+ * 5 Cypress Bridge I/O
+ * 6 SCSI Adaptec builtin
+ * 7 64 bit PCI option slot 0 (all busses)
+ * 8 64 bit PCI option slot 1 (all busses)
+ * 9 64 bit PCI option slot 2 (all busses)
+ * 10 64 bit PCI option slot 3 (not bus 0)
+ */
+
+static int __init
+dp264_map_irq(struct pci_dev *dev, u8 slot, u8 pin)
+{
+ static char irq_tab[6][5] __initdata = {
+ /*INT INTA INTB INTC INTD */
+ { -1, -1, -1, -1, -1}, /* IdSel 5 ISA Bridge */
+ { 16+ 3, 16+ 3, 16+ 2, 16+ 2, 16+ 2}, /* IdSel 6 SCSI builtin*/
+ { 16+15, 16+15, 16+14, 16+13, 16+12}, /* IdSel 7 slot 0 */
+ { 16+11, 16+11, 16+10, 16+ 9, 16+ 8}, /* IdSel 8 slot 1 */
+ { 16+ 7, 16+ 7, 16+ 6, 16+ 5, 16+ 4}, /* IdSel 9 slot 2 */
+ { 16+ 3, 16+ 3, 16+ 2, 16+ 1, 16+ 0} /* IdSel 10 slot 3 */
+ };
+ const long min_idsel = 5, max_idsel = 10, irqs_per_slot = 5;
+
+ struct pci_controller *hose = dev->sysdata;
+ int irq = COMMON_TABLE_LOOKUP;
+
+ if (irq > 0) {
+ irq += 16 * hose->index;
+ } else {
+ /* ??? The Contaq IDE controller on the ISA bridge uses
+ "legacy" interrupts 14 and 15. I don't know if anything
+ can wind up at the same slot+pin on hose1, so we'll
+ just have to trust whatever value the console might
+ have assigned. */
+
+ u8 irq8;
+ pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &irq8);
+ irq = irq8;
+ }
+
+ return irq;
+}
+
+static int __init
+monet_map_irq(struct pci_dev *dev, u8 slot, u8 pin)
+{
+ static char irq_tab[13][5] __initdata = {
+ /*INT INTA INTB INTC INTD */
+ { 45, 45, 45, 45, 45}, /* IdSel 3 21143 PCI1 */
+ { -1, -1, -1, -1, -1}, /* IdSel 4 unused */
+ { -1, -1, -1, -1, -1}, /* IdSel 5 unused */
+ { 47, 47, 47, 47, 47}, /* IdSel 6 SCSI PCI1 */
+ { -1, -1, -1, -1, -1}, /* IdSel 7 ISA Bridge */
+ { -1, -1, -1, -1, -1}, /* IdSel 8 P2P PCI1 */
+#if 1
+ { 28, 28, 29, 30, 31}, /* IdSel 14 slot 4 PCI2*/
+ { 24, 24, 25, 26, 27}, /* IdSel 15 slot 5 PCI2*/
+#else
+ { -1, -1, -1, -1, -1}, /* IdSel 9 unused */
+ { -1, -1, -1, -1, -1}, /* IdSel 10 unused */
+#endif
+ { 40, 40, 41, 42, 43}, /* IdSel 11 slot 1 PCI0*/
+ { 36, 36, 37, 38, 39}, /* IdSel 12 slot 2 PCI0*/
+ { 32, 32, 33, 34, 35}, /* IdSel 13 slot 3 PCI0*/
+ { 28, 28, 29, 30, 31}, /* IdSel 14 slot 4 PCI2*/
+ { 24, 24, 25, 26, 27} /* IdSel 15 slot 5 PCI2*/
+ };
+ const long min_idsel = 3, max_idsel = 15, irqs_per_slot = 5;
+ return COMMON_TABLE_LOOKUP;
+}
+
+static u8 __init
+monet_swizzle(struct pci_dev *dev, u8 *pinp)
+{
+ struct pci_controller *hose = dev->sysdata;
+ int slot, pin = *pinp;
+
+ if (!dev->bus->parent) {
+ slot = PCI_SLOT(dev->devfn);
+ }
+ /* Check for the built-in bridge on hose 1. */
+ else if (hose->index == 1 && PCI_SLOT(dev->bus->self->devfn) == 8) {
+ slot = PCI_SLOT(dev->devfn);
+ } else {
+ /* Must be a card-based bridge. */
+ do {
+ /* Check for built-in bridge on hose 1. */
+ if (hose->index == 1 &&
+ PCI_SLOT(dev->bus->self->devfn) == 8) {
+ slot = PCI_SLOT(dev->devfn);
+ break;
+ }
+ pin = bridge_swizzle(pin, PCI_SLOT(dev->devfn)) ;
+
+ /* Move up the chain of bridges. */
+ dev = dev->bus->self;
+ /* Slot of the next bridge. */
+ slot = PCI_SLOT(dev->devfn);
+ } while (dev->bus->self);
+ }
+ *pinp = pin;
+ return slot;
+}
+
+static int __init
+webbrick_map_irq(struct pci_dev *dev, u8 slot, u8 pin)
+{
+ static char irq_tab[13][5] __initdata = {
+ /*INT INTA INTB INTC INTD */
+ { -1, -1, -1, -1, -1}, /* IdSel 7 ISA Bridge */
+ { -1, -1, -1, -1, -1}, /* IdSel 8 unused */
+ { 29, 29, 29, 29, 29}, /* IdSel 9 21143 #1 */
+ { -1, -1, -1, -1, -1}, /* IdSel 10 unused */
+ { 30, 30, 30, 30, 30}, /* IdSel 11 21143 #2 */
+ { -1, -1, -1, -1, -1}, /* IdSel 12 unused */
+ { -1, -1, -1, -1, -1}, /* IdSel 13 unused */
+ { 35, 35, 34, 33, 32}, /* IdSel 14 slot 0 */
+ { 39, 39, 38, 37, 36}, /* IdSel 15 slot 1 */
+ { 43, 43, 42, 41, 40}, /* IdSel 16 slot 2 */
+ { 47, 47, 46, 45, 44}, /* IdSel 17 slot 3 */
+ };
+ const long min_idsel = 7, max_idsel = 17, irqs_per_slot = 5;
+ return COMMON_TABLE_LOOKUP;
+}
+
+static int __init
+clipper_map_irq(struct pci_dev *dev, u8 slot, u8 pin)
+{
+ static char irq_tab[7][5] __initdata = {
+ /*INT INTA INTB INTC INTD */
+ { 16+ 8, 16+ 8, 16+ 9, 16+10, 16+11}, /* IdSel 1 slot 1 */
+ { 16+12, 16+12, 16+13, 16+14, 16+15}, /* IdSel 2 slot 2 */
+ { 16+16, 16+16, 16+17, 16+18, 16+19}, /* IdSel 3 slot 3 */
+ { 16+20, 16+20, 16+21, 16+22, 16+23}, /* IdSel 4 slot 4 */
+ { 16+24, 16+24, 16+25, 16+26, 16+27}, /* IdSel 5 slot 5 */
+ { 16+28, 16+28, 16+29, 16+30, 16+31}, /* IdSel 6 slot 6 */
+ { -1, -1, -1, -1, -1} /* IdSel 7 ISA Bridge */
+ };
+ const long min_idsel = 1, max_idsel = 7, irqs_per_slot = 5;
+
+ struct pci_controller *hose = dev->sysdata;
+ int irq = COMMON_TABLE_LOOKUP;
+
+ if (irq > 0)
+ irq += 16 * hose->index;
+
+ return irq;
+}
+
+static void __init
+dp264_init_pci(void)
+{
+ common_init_pci();
+ SMC669_Init(0);
+}
+
+static void __init
+monet_init_pci(void)
+{
+ common_init_pci();
+ SMC669_Init(1);
+ es1888_init();
+}
+
+static void __init
+webbrick_init_arch(void)
+{
+ tsunami_init_arch();
+
+ /* Tsunami caches 4 PTEs at a time; DS10 has only 1 hose. */
+ hose_head->sg_isa->align_entry = 4;
+ hose_head->sg_pci->align_entry = 4;
+}
+
+
+/*
+ * The System Vectors
+ */
+
+struct alpha_machine_vector dp264_mv __initmv = {
+ .vector_name = "DP264",
+ DO_EV6_MMU,
+ DO_DEFAULT_RTC,
+ DO_TSUNAMI_IO,
+ .machine_check = tsunami_machine_check,
+ .max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS,
+ .min_io_address = DEFAULT_IO_BASE,
+ .min_mem_address = DEFAULT_MEM_BASE,
+ .pci_dac_offset = TSUNAMI_DAC_OFFSET,
+
+ .nr_irqs = 64,
+ .device_interrupt = dp264_device_interrupt,
+
+ .init_arch = tsunami_init_arch,
+ .init_irq = dp264_init_irq,
+ .init_rtc = common_init_rtc,
+ .init_pci = dp264_init_pci,
+ .kill_arch = tsunami_kill_arch,
+ .pci_map_irq = dp264_map_irq,
+ .pci_swizzle = common_swizzle,
+};
+ALIAS_MV(dp264)
+
+struct alpha_machine_vector monet_mv __initmv = {
+ .vector_name = "Monet",
+ DO_EV6_MMU,
+ DO_DEFAULT_RTC,
+ DO_TSUNAMI_IO,
+ .machine_check = tsunami_machine_check,
+ .max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS,
+ .min_io_address = DEFAULT_IO_BASE,
+ .min_mem_address = DEFAULT_MEM_BASE,
+ .pci_dac_offset = TSUNAMI_DAC_OFFSET,
+
+ .nr_irqs = 64,
+ .device_interrupt = dp264_device_interrupt,
+
+ .init_arch = tsunami_init_arch,
+ .init_irq = dp264_init_irq,
+ .init_rtc = common_init_rtc,
+ .init_pci = monet_init_pci,
+ .kill_arch = tsunami_kill_arch,
+ .pci_map_irq = monet_map_irq,
+ .pci_swizzle = monet_swizzle,
+};
+
+struct alpha_machine_vector webbrick_mv __initmv = {
+ .vector_name = "Webbrick",
+ DO_EV6_MMU,
+ DO_DEFAULT_RTC,
+ DO_TSUNAMI_IO,
+ .machine_check = tsunami_machine_check,
+ .max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS,
+ .min_io_address = DEFAULT_IO_BASE,
+ .min_mem_address = DEFAULT_MEM_BASE,
+ .pci_dac_offset = TSUNAMI_DAC_OFFSET,
+
+ .nr_irqs = 64,
+ .device_interrupt = dp264_device_interrupt,
+
+ .init_arch = webbrick_init_arch,
+ .init_irq = dp264_init_irq,
+ .init_rtc = common_init_rtc,
+ .init_pci = common_init_pci,
+ .kill_arch = tsunami_kill_arch,
+ .pci_map_irq = webbrick_map_irq,
+ .pci_swizzle = common_swizzle,
+};
+
+struct alpha_machine_vector clipper_mv __initmv = {
+ .vector_name = "Clipper",
+ DO_EV6_MMU,
+ DO_DEFAULT_RTC,
+ DO_TSUNAMI_IO,
+ .machine_check = tsunami_machine_check,
+ .max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS,
+ .min_io_address = DEFAULT_IO_BASE,
+ .min_mem_address = DEFAULT_MEM_BASE,
+ .pci_dac_offset = TSUNAMI_DAC_OFFSET,
+
+ .nr_irqs = 64,
+ .device_interrupt = dp264_device_interrupt,
+
+ .init_arch = tsunami_init_arch,
+ .init_irq = clipper_init_irq,
+ .init_rtc = common_init_rtc,
+ .init_pci = common_init_pci,
+ .kill_arch = tsunami_kill_arch,
+ .pci_map_irq = clipper_map_irq,
+ .pci_swizzle = common_swizzle,
+};
+
+/* Sharks strongly resemble Clipper, at least as far
+ * as interrupt routing, etc, so we're using the
+ * same functions as Clipper does
+ */
+
+struct alpha_machine_vector shark_mv __initmv = {
+ .vector_name = "Shark",
+ DO_EV6_MMU,
+ DO_DEFAULT_RTC,
+ DO_TSUNAMI_IO,
+ .machine_check = tsunami_machine_check,
+ .max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS,
+ .min_io_address = DEFAULT_IO_BASE,
+ .min_mem_address = DEFAULT_MEM_BASE,
+ .pci_dac_offset = TSUNAMI_DAC_OFFSET,
+
+ .nr_irqs = 64,
+ .device_interrupt = dp264_device_interrupt,
+
+ .init_arch = tsunami_init_arch,
+ .init_irq = clipper_init_irq,
+ .init_rtc = common_init_rtc,
+ .init_pci = common_init_pci,
+ .kill_arch = tsunami_kill_arch,
+ .pci_map_irq = clipper_map_irq,
+ .pci_swizzle = common_swizzle,
+};
+
+/* No alpha_mv alias for webbrick/monet/clipper, since we compile them
+ in unconditionally with DP264; setup_arch knows how to cope. */
diff --git a/arch/alpha/kernel/sys_eb64p.c b/arch/alpha/kernel/sys_eb64p.c
new file mode 100644
index 000000000000..61a79c354f0b
--- /dev/null
+++ b/arch/alpha/kernel/sys_eb64p.c
@@ -0,0 +1,256 @@
+/*
+ * linux/arch/alpha/kernel/sys_eb64p.c
+ *
+ * Copyright (C) 1995 David A Rusling
+ * Copyright (C) 1996 Jay A Estabrook
+ * Copyright (C) 1998, 1999 Richard Henderson
+ *
+ * Code supporting the EB64+ and EB66.
+ */
+
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/mm.h>
+#include <linux/sched.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/bitops.h>
+
+#include <asm/ptrace.h>
+#include <asm/system.h>
+#include <asm/dma.h>
+#include <asm/irq.h>
+#include <asm/mmu_context.h>
+#include <asm/io.h>
+#include <asm/pgtable.h>
+#include <asm/core_apecs.h>
+#include <asm/core_lca.h>
+#include <asm/hwrpb.h>
+#include <asm/tlbflush.h>
+
+#include "proto.h"
+#include "irq_impl.h"
+#include "pci_impl.h"
+#include "machvec_impl.h"
+
+
+/* Note mask bit is true for DISABLED irqs. */
+static unsigned int cached_irq_mask = -1;
+
+static inline void
+eb64p_update_irq_hw(unsigned int irq, unsigned long mask)
+{
+ outb(mask >> (irq >= 24 ? 24 : 16), (irq >= 24 ? 0x27 : 0x26));
+}
+
+static inline void
+eb64p_enable_irq(unsigned int irq)
+{
+ eb64p_update_irq_hw(irq, cached_irq_mask &= ~(1 << irq));
+}
+
+static void
+eb64p_disable_irq(unsigned int irq)
+{
+ eb64p_update_irq_hw(irq, cached_irq_mask |= 1 << irq);
+}
+
+static unsigned int
+eb64p_startup_irq(unsigned int irq)
+{
+ eb64p_enable_irq(irq);
+ return 0; /* never anything pending */
+}
+
+static void
+eb64p_end_irq(unsigned int irq)
+{
+ if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS)))
+ eb64p_enable_irq(irq);
+}
+
+static struct hw_interrupt_type eb64p_irq_type = {
+ .typename = "EB64P",
+ .startup = eb64p_startup_irq,
+ .shutdown = eb64p_disable_irq,
+ .enable = eb64p_enable_irq,
+ .disable = eb64p_disable_irq,
+ .ack = eb64p_disable_irq,
+ .end = eb64p_end_irq,
+};
+
+static void
+eb64p_device_interrupt(unsigned long vector, struct pt_regs *regs)
+{
+ unsigned long pld;
+ unsigned int i;
+
+ /* Read the interrupt summary registers */
+ pld = inb(0x26) | (inb(0x27) << 8);
+
+ /*
+ * Now, for every possible bit set, work through
+ * them and call the appropriate interrupt handler.
+ */
+ while (pld) {
+ i = ffz(~pld);
+ pld &= pld - 1; /* clear least bit set */
+
+ if (i == 5) {
+ isa_device_interrupt(vector, regs);
+ } else {
+ handle_irq(16 + i, regs);
+ }
+ }
+}
+
+static void __init
+eb64p_init_irq(void)
+{
+ long i;
+
+#if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_CABRIOLET)
+ /*
+ * CABRIO SRM may not set variation correctly, so here we test
+ * the high word of the interrupt summary register for the RAZ
+ * bits, and hope that a true EB64+ would read all ones...
+ */
+ if (inw(0x806) != 0xffff) {
+ extern struct alpha_machine_vector cabriolet_mv;
+
+ printk("Detected Cabriolet: correcting HWRPB.\n");
+
+ hwrpb->sys_variation |= 2L << 10;
+ hwrpb_update_checksum(hwrpb);
+
+ alpha_mv = cabriolet_mv;
+ alpha_mv.init_irq();
+ return;
+ }
+#endif /* GENERIC */
+
+ outb(0xff, 0x26);
+ outb(0xff, 0x27);
+
+ init_i8259a_irqs();
+
+ for (i = 16; i < 32; ++i) {
+ irq_desc[i].status = IRQ_DISABLED | IRQ_LEVEL;
+ irq_desc[i].handler = &eb64p_irq_type;
+ }
+
+ common_init_isa_dma();
+ setup_irq(16+5, &isa_cascade_irqaction);
+}
+
+/*
+ * PCI Fixup configuration.
+ *
+ * There are two 8 bit external summary registers as follows:
+ *
+ * Summary @ 0x26:
+ * Bit Meaning
+ * 0 Interrupt Line A from slot 0
+ * 1 Interrupt Line A from slot 1
+ * 2 Interrupt Line B from slot 0
+ * 3 Interrupt Line B from slot 1
+ * 4 Interrupt Line C from slot 0
+ * 5 Interrupt line from the two ISA PICs
+ * 6 Tulip
+ * 7 NCR SCSI
+ *
+ * Summary @ 0x27
+ * Bit Meaning
+ * 0 Interrupt Line C from slot 1
+ * 1 Interrupt Line D from slot 0
+ * 2 Interrupt Line D from slot 1
+ * 3 RAZ
+ * 4 RAZ
+ * 5 RAZ
+ * 6 RAZ
+ * 7 RAZ
+ *
+ * The device to slot mapping looks like:
+ *
+ * Slot Device
+ * 5 NCR SCSI controller
+ * 6 PCI on board slot 0
+ * 7 PCI on board slot 1
+ * 8 Intel SIO PCI-ISA bridge chip
+ * 9 Tulip - DECchip 21040 Ethernet controller
+ *
+ *
+ * This two layered interrupt approach means that we allocate IRQ 16 and
+ * above for PCI interrupts. The IRQ relates to which bit the interrupt
+ * comes in on. This makes interrupt processing much easier.
+ */
+
+static int __init
+eb64p_map_irq(struct pci_dev *dev, u8 slot, u8 pin)
+{
+ static char irq_tab[5][5] __initdata = {
+ /*INT INTA INTB INTC INTD */
+ {16+7, 16+7, 16+7, 16+7, 16+7}, /* IdSel 5, slot ?, ?? */
+ {16+0, 16+0, 16+2, 16+4, 16+9}, /* IdSel 6, slot ?, ?? */
+ {16+1, 16+1, 16+3, 16+8, 16+10}, /* IdSel 7, slot ?, ?? */
+ { -1, -1, -1, -1, -1}, /* IdSel 8, SIO */
+ {16+6, 16+6, 16+6, 16+6, 16+6}, /* IdSel 9, TULIP */
+ };
+ const long min_idsel = 5, max_idsel = 9, irqs_per_slot = 5;
+ return COMMON_TABLE_LOOKUP;
+}
+
+
+/*
+ * The System Vector
+ */
+
+#if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_EB64P)
+struct alpha_machine_vector eb64p_mv __initmv = {
+ .vector_name = "EB64+",
+ DO_EV4_MMU,
+ DO_DEFAULT_RTC,
+ DO_APECS_IO,
+ .machine_check = apecs_machine_check,
+ .max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS,
+ .min_io_address = DEFAULT_IO_BASE,
+ .min_mem_address = APECS_AND_LCA_DEFAULT_MEM_BASE,
+
+ .nr_irqs = 32,
+ .device_interrupt = eb64p_device_interrupt,
+
+ .init_arch = apecs_init_arch,
+ .init_irq = eb64p_init_irq,
+ .init_rtc = common_init_rtc,
+ .init_pci = common_init_pci,
+ .kill_arch = NULL,
+ .pci_map_irq = eb64p_map_irq,
+ .pci_swizzle = common_swizzle,
+};
+ALIAS_MV(eb64p)
+#endif
+
+#if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_EB66)
+struct alpha_machine_vector eb66_mv __initmv = {
+ .vector_name = "EB66",
+ DO_EV4_MMU,
+ DO_DEFAULT_RTC,
+ DO_LCA_IO,
+ .machine_check = lca_machine_check,
+ .max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS,
+ .min_io_address = DEFAULT_IO_BASE,
+ .min_mem_address = APECS_AND_LCA_DEFAULT_MEM_BASE,
+
+ .nr_irqs = 32,
+ .device_interrupt = eb64p_device_interrupt,
+
+ .init_arch = lca_init_arch,
+ .init_irq = eb64p_init_irq,
+ .init_rtc = common_init_rtc,
+ .init_pci = common_init_pci,
+ .pci_map_irq = eb64p_map_irq,
+ .pci_swizzle = common_swizzle,
+};
+ALIAS_MV(eb66)
+#endif
diff --git a/arch/alpha/kernel/sys_eiger.c b/arch/alpha/kernel/sys_eiger.c
new file mode 100644
index 000000000000..bd6e5f0e43c7
--- /dev/null
+++ b/arch/alpha/kernel/sys_eiger.c
@@ -0,0 +1,242 @@
+/*
+ * linux/arch/alpha/kernel/sys_eiger.c
+ *
+ * Copyright (C) 1995 David A Rusling
+ * Copyright (C) 1996, 1999 Jay A Estabrook
+ * Copyright (C) 1998, 1999 Richard Henderson
+ * Copyright (C) 1999 Iain Grant
+ *
+ * Code supporting the EIGER (EV6+TSUNAMI).
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/mm.h>
+#include <linux/sched.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/bitops.h>
+
+#include <asm/ptrace.h>
+#include <asm/system.h>
+#include <asm/dma.h>
+#include <asm/irq.h>
+#include <asm/mmu_context.h>
+#include <asm/io.h>
+#include <asm/pci.h>
+#include <asm/pgtable.h>
+#include <asm/core_tsunami.h>
+#include <asm/hwrpb.h>
+#include <asm/tlbflush.h>
+
+#include "proto.h"
+#include "irq_impl.h"
+#include "pci_impl.h"
+#include "machvec_impl.h"
+
+
+/* Note that this interrupt code is identical to TAKARA. */
+
+/* Note mask bit is true for DISABLED irqs. */
+static unsigned long cached_irq_mask[2] = { -1, -1 };
+
+static inline void
+eiger_update_irq_hw(unsigned long irq, unsigned long mask)
+{
+ int regaddr;
+
+ mask = (irq >= 64 ? mask << 16 : mask >> ((irq - 16) & 0x30));
+ regaddr = 0x510 + (((irq - 16) >> 2) & 0x0c);
+ outl(mask & 0xffff0000UL, regaddr);
+}
+
+static inline void
+eiger_enable_irq(unsigned int irq)
+{
+ unsigned long mask;
+ mask = (cached_irq_mask[irq >= 64] &= ~(1UL << (irq & 63)));
+ eiger_update_irq_hw(irq, mask);
+}
+
+static void
+eiger_disable_irq(unsigned int irq)
+{
+ unsigned long mask;
+ mask = (cached_irq_mask[irq >= 64] |= 1UL << (irq & 63));
+ eiger_update_irq_hw(irq, mask);
+}
+
+static unsigned int
+eiger_startup_irq(unsigned int irq)
+{
+ eiger_enable_irq(irq);
+ return 0; /* never anything pending */
+}
+
+static void
+eiger_end_irq(unsigned int irq)
+{
+ if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS)))
+ eiger_enable_irq(irq);
+}
+
+static struct hw_interrupt_type eiger_irq_type = {
+ .typename = "EIGER",
+ .startup = eiger_startup_irq,
+ .shutdown = eiger_disable_irq,
+ .enable = eiger_enable_irq,
+ .disable = eiger_disable_irq,
+ .ack = eiger_disable_irq,
+ .end = eiger_end_irq,
+};
+
+static void
+eiger_device_interrupt(unsigned long vector, struct pt_regs * regs)
+{
+ unsigned intstatus;
+
+ /*
+ * The PALcode will have passed us vectors 0x800 or 0x810,
+ * which are fairly arbitrary values and serve only to tell
+ * us whether an interrupt has come in on IRQ0 or IRQ1. If
+ * it's IRQ1 it's a PCI interrupt; if it's IRQ0, it's
+ * probably ISA, but PCI interrupts can come through IRQ0
+ * as well if the interrupt controller isn't in accelerated
+ * mode.
+ *
+ * OTOH, the accelerator thing doesn't seem to be working
+ * overly well, so what we'll do instead is try directly
+ * examining the Master Interrupt Register to see if it's a
+ * PCI interrupt, and if _not_ then we'll pass it on to the
+ * ISA handler.
+ */
+
+ intstatus = inw(0x500) & 15;
+ if (intstatus) {
+ /*
+ * This is a PCI interrupt. Check each bit and
+ * despatch an interrupt if it's set.
+ */
+
+ if (intstatus & 8) handle_irq(16+3, regs);
+ if (intstatus & 4) handle_irq(16+2, regs);
+ if (intstatus & 2) handle_irq(16+1, regs);
+ if (intstatus & 1) handle_irq(16+0, regs);
+ } else {
+ isa_device_interrupt(vector, regs);
+ }
+}
+
+static void
+eiger_srm_device_interrupt(unsigned long vector, struct pt_regs * regs)
+{
+ int irq = (vector - 0x800) >> 4;
+ handle_irq(irq, regs);
+}
+
+static void __init
+eiger_init_irq(void)
+{
+ long i;
+
+ outb(0, DMA1_RESET_REG);
+ outb(0, DMA2_RESET_REG);
+ outb(DMA_MODE_CASCADE, DMA2_MODE_REG);
+ outb(0, DMA2_MASK_REG);
+
+ if (alpha_using_srm)
+ alpha_mv.device_interrupt = eiger_srm_device_interrupt;
+
+ for (i = 16; i < 128; i += 16)
+ eiger_update_irq_hw(i, -1);
+
+ init_i8259a_irqs();
+
+ for (i = 16; i < 128; ++i) {
+ irq_desc[i].status = IRQ_DISABLED | IRQ_LEVEL;
+ irq_desc[i].handler = &eiger_irq_type;
+ }
+}
+
+static int __init
+eiger_map_irq(struct pci_dev *dev, u8 slot, u8 pin)
+{
+ u8 irq_orig;
+
+ /* The SRM console has already calculated out the IRQ value's for
+ option cards. As this works lets just read in the value already
+ set and change it to a useable value by Linux.
+
+ All the IRQ values generated by the console are greater than 90,
+ so we subtract 80 because it is (90 - allocated ISA IRQ's). */
+
+ pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &irq_orig);
+
+ return irq_orig - 0x80;
+}
+
+static u8 __init
+eiger_swizzle(struct pci_dev *dev, u8 *pinp)
+{
+ struct pci_controller *hose = dev->sysdata;
+ int slot, pin = *pinp;
+ int bridge_count = 0;
+
+ /* Find the number of backplane bridges. */
+ int backplane = inw(0x502) & 0x0f;
+
+ switch (backplane)
+ {
+ case 0x00: bridge_count = 0; break; /* No bridges */
+ case 0x01: bridge_count = 1; break; /* 1 */
+ case 0x03: bridge_count = 2; break; /* 2 */
+ case 0x07: bridge_count = 3; break; /* 3 */
+ case 0x0f: bridge_count = 4; break; /* 4 */
+ };
+
+ slot = PCI_SLOT(dev->devfn);
+ while (dev->bus->self) {
+ /* Check for built-in bridges on hose 0. */
+ if (hose->index == 0
+ && (PCI_SLOT(dev->bus->self->devfn)
+ > 20 - bridge_count)) {
+ slot = PCI_SLOT(dev->devfn);
+ break;
+ }
+ /* Must be a card-based bridge. */
+ pin = bridge_swizzle(pin, PCI_SLOT(dev->devfn));
+
+ /* Move up the chain of bridges. */
+ dev = dev->bus->self;
+ }
+ *pinp = pin;
+ return slot;
+}
+
+/*
+ * The System Vectors
+ */
+
+struct alpha_machine_vector eiger_mv __initmv = {
+ .vector_name = "Eiger",
+ DO_EV6_MMU,
+ DO_DEFAULT_RTC,
+ DO_TSUNAMI_IO,
+ .machine_check = tsunami_machine_check,
+ .max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS,
+ .min_io_address = DEFAULT_IO_BASE,
+ .min_mem_address = DEFAULT_MEM_BASE,
+ .pci_dac_offset = TSUNAMI_DAC_OFFSET,
+
+ .nr_irqs = 128,
+ .device_interrupt = eiger_device_interrupt,
+
+ .init_arch = tsunami_init_arch,
+ .init_irq = eiger_init_irq,
+ .init_rtc = common_init_rtc,
+ .init_pci = common_init_pci,
+ .kill_arch = tsunami_kill_arch,
+ .pci_map_irq = eiger_map_irq,
+ .pci_swizzle = eiger_swizzle,
+};
+ALIAS_MV(eiger)
diff --git a/arch/alpha/kernel/sys_jensen.c b/arch/alpha/kernel/sys_jensen.c
new file mode 100644
index 000000000000..fcabb7c96a16
--- /dev/null
+++ b/arch/alpha/kernel/sys_jensen.c
@@ -0,0 +1,274 @@
+/*
+ * linux/arch/alpha/kernel/sys_jensen.c
+ *
+ * Copyright (C) 1995 Linus Torvalds
+ * Copyright (C) 1998, 1999 Richard Henderson
+ *
+ * Code supporting the Jensen.
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/mm.h>
+#include <linux/sched.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+
+#include <asm/ptrace.h>
+#include <asm/system.h>
+
+#define __EXTERN_INLINE inline
+#include <asm/io.h>
+#include <asm/jensen.h>
+#undef __EXTERN_INLINE
+
+#include <asm/dma.h>
+#include <asm/irq.h>
+#include <asm/mmu_context.h>
+#include <asm/pgtable.h>
+#include <asm/tlbflush.h>
+
+#include "proto.h"
+#include "irq_impl.h"
+#include "pci_impl.h"
+#include "machvec_impl.h"
+
+
+/*
+ * Jensen is special: the vector is 0x8X0 for EISA interrupt X, and
+ * 0x9X0 for the local motherboard interrupts.
+ *
+ * Note especially that those local interrupts CANNOT be masked,
+ * which causes much of the pain below...
+ *
+ * 0x660 - NMI
+ *
+ * 0x800 - IRQ0 interval timer (not used, as we use the RTC timer)
+ * 0x810 - IRQ1 line printer (duh..)
+ * 0x860 - IRQ6 floppy disk
+ *
+ * 0x900 - COM1
+ * 0x920 - COM2
+ * 0x980 - keyboard
+ * 0x990 - mouse
+ *
+ * PCI-based systems are more sane: they don't have the local
+ * interrupts at all, and have only normal PCI interrupts from
+ * devices. Happily it's easy enough to do a sane mapping from the
+ * Jensen.
+ *
+ * Note that this means that we may have to do a hardware
+ * "local_op" to a different interrupt than we report to the rest of the
+ * world.
+ */
+
+static unsigned int
+jensen_local_startup(unsigned int irq)
+{
+ /* the parport is really hw IRQ 1, silly Jensen. */
+ if (irq == 7)
+ i8259a_startup_irq(1);
+ else
+ /*
+ * For all true local interrupts, set the flag that prevents
+ * the IPL from being dropped during handler processing.
+ */
+ if (irq_desc[irq].action)
+ irq_desc[irq].action->flags |= SA_INTERRUPT;
+ return 0;
+}
+
+static void
+jensen_local_shutdown(unsigned int irq)
+{
+ /* the parport is really hw IRQ 1, silly Jensen. */
+ if (irq == 7)
+ i8259a_disable_irq(1);
+}
+
+static void
+jensen_local_enable(unsigned int irq)
+{
+ /* the parport is really hw IRQ 1, silly Jensen. */
+ if (irq == 7)
+ i8259a_enable_irq(1);
+}
+
+static void
+jensen_local_disable(unsigned int irq)
+{
+ /* the parport is really hw IRQ 1, silly Jensen. */
+ if (irq == 7)
+ i8259a_disable_irq(1);
+}
+
+static void
+jensen_local_ack(unsigned int irq)
+{
+ /* the parport is really hw IRQ 1, silly Jensen. */
+ if (irq == 7)
+ i8259a_mask_and_ack_irq(1);
+}
+
+static void
+jensen_local_end(unsigned int irq)
+{
+ /* the parport is really hw IRQ 1, silly Jensen. */
+ if (irq == 7)
+ i8259a_end_irq(1);
+}
+
+static struct hw_interrupt_type jensen_local_irq_type = {
+ .typename = "LOCAL",
+ .startup = jensen_local_startup,
+ .shutdown = jensen_local_shutdown,
+ .enable = jensen_local_enable,
+ .disable = jensen_local_disable,
+ .ack = jensen_local_ack,
+ .end = jensen_local_end,
+};
+
+static void
+jensen_device_interrupt(unsigned long vector, struct pt_regs * regs)
+{
+ int irq;
+
+ switch (vector) {
+ case 0x660:
+ printk("Whee.. NMI received. Probable hardware error\n");
+ printk("61=%02x, 461=%02x\n", inb(0x61), inb(0x461));
+ return;
+
+ /* local device interrupts: */
+ case 0x900: irq = 4; break; /* com1 -> irq 4 */
+ case 0x920: irq = 3; break; /* com2 -> irq 3 */
+ case 0x980: irq = 1; break; /* kbd -> irq 1 */
+ case 0x990: irq = 9; break; /* mouse -> irq 9 */
+
+ default:
+ if (vector > 0x900) {
+ printk("Unknown local interrupt %lx\n", vector);
+ return;
+ }
+
+ irq = (vector - 0x800) >> 4;
+ if (irq == 1)
+ irq = 7;
+ break;
+ }
+
+ /* If there is no handler yet... */
+ if (irq_desc[irq].action == NULL) {
+ /* If it is a local interrupt that cannot be masked... */
+ if (vector >= 0x900)
+ {
+ /* Clear keyboard/mouse state */
+ inb(0x64);
+ inb(0x60);
+ /* Reset serial ports */
+ inb(0x3fa);
+ inb(0x2fa);
+ outb(0x0c, 0x3fc);
+ outb(0x0c, 0x2fc);
+ /* Clear NMI */
+ outb(0,0x61);
+ outb(0,0x461);
+ }
+ }
+
+#if 0
+ /* A useful bit of code to find out if an interrupt is going wild. */
+ {
+ static unsigned int last_msg = 0, last_cc = 0;
+ static int last_irq = -1, count = 0;
+ unsigned int cc;
+
+ __asm __volatile("rpcc %0" : "=r"(cc));
+ ++count;
+#define JENSEN_CYCLES_PER_SEC (150000000)
+ if (cc - last_msg > ((JENSEN_CYCLES_PER_SEC) * 3) ||
+ irq != last_irq) {
+ printk(KERN_CRIT " irq %d count %d cc %u @ %lx\n",
+ irq, count, cc-last_cc, regs->pc);
+ count = 0;
+ last_msg = cc;
+ last_irq = irq;
+ }
+ last_cc = cc;
+ }
+#endif
+
+ handle_irq(irq, regs);
+}
+
+static void __init
+jensen_init_irq(void)
+{
+ init_i8259a_irqs();
+
+ irq_desc[1].handler = &jensen_local_irq_type;
+ irq_desc[4].handler = &jensen_local_irq_type;
+ irq_desc[3].handler = &jensen_local_irq_type;
+ irq_desc[7].handler = &jensen_local_irq_type;
+ irq_desc[9].handler = &jensen_local_irq_type;
+
+ common_init_isa_dma();
+}
+
+static void __init
+jensen_init_arch(void)
+{
+ struct pci_controller *hose;
+#ifdef CONFIG_PCI
+ static struct pci_dev fake_isa_bridge = { .dma_mask = 0xffffffffUL, };
+
+ isa_bridge = &fake_isa_bridge;
+#endif
+
+ /* Create a hose so that we can report i/o base addresses to
+ userland. */
+
+ pci_isa_hose = hose = alloc_pci_controller();
+ hose->io_space = &ioport_resource;
+ hose->mem_space = &iomem_resource;
+ hose->index = 0;
+
+ hose->sparse_mem_base = EISA_MEM - IDENT_ADDR;
+ hose->dense_mem_base = 0;
+ hose->sparse_io_base = EISA_IO - IDENT_ADDR;
+ hose->dense_io_base = 0;
+
+ hose->sg_isa = hose->sg_pci = NULL;
+ __direct_map_base = 0;
+ __direct_map_size = 0xffffffff;
+}
+
+static void
+jensen_machine_check (u64 vector, u64 la, struct pt_regs *regs)
+{
+ printk(KERN_CRIT "Machine check\n");
+}
+
+
+/*
+ * The System Vector
+ */
+
+struct alpha_machine_vector jensen_mv __initmv = {
+ .vector_name = "Jensen",
+ DO_EV4_MMU,
+ IO_LITE(JENSEN,jensen),
+ .machine_check = jensen_machine_check,
+ .max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS,
+ .rtc_port = 0x170,
+
+ .nr_irqs = 16,
+ .device_interrupt = jensen_device_interrupt,
+
+ .init_arch = jensen_init_arch,
+ .init_irq = jensen_init_irq,
+ .init_rtc = common_init_rtc,
+ .init_pci = NULL,
+ .kill_arch = NULL,
+};
+ALIAS_MV(jensen)
diff --git a/arch/alpha/kernel/sys_marvel.c b/arch/alpha/kernel/sys_marvel.c
new file mode 100644
index 000000000000..804727853d25
--- /dev/null
+++ b/arch/alpha/kernel/sys_marvel.c
@@ -0,0 +1,499 @@
+/*
+ * linux/arch/alpha/kernel/sys_marvel.c
+ *
+ * Marvel / IO7 support
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/mm.h>
+#include <linux/sched.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/bitops.h>
+
+#include <asm/ptrace.h>
+#include <asm/system.h>
+#include <asm/dma.h>
+#include <asm/irq.h>
+#include <asm/mmu_context.h>
+#include <asm/io.h>
+#include <asm/pgtable.h>
+#include <asm/core_marvel.h>
+#include <asm/hwrpb.h>
+#include <asm/tlbflush.h>
+
+#include "proto.h"
+#include "err_impl.h"
+#include "irq_impl.h"
+#include "pci_impl.h"
+#include "machvec_impl.h"
+
+#if NR_IRQS < MARVEL_NR_IRQS
+# error NR_IRQS < MARVEL_NR_IRQS !!!
+#endif
+
+
+/*
+ * Interrupt handling.
+ */
+static void
+io7_device_interrupt(unsigned long vector, struct pt_regs * regs)
+{
+ unsigned int pid;
+ unsigned int irq;
+
+ /*
+ * Vector is 0x800 + (interrupt)
+ *
+ * where (interrupt) is:
+ *
+ * ...16|15 14|13 4|3 0
+ * -----+-----+--------+---
+ * PE | 0 | irq | 0
+ *
+ * where (irq) is
+ *
+ * 0x0800 - 0x0ff0 - 0x0800 + (LSI id << 4)
+ * 0x1000 - 0x2ff0 - 0x1000 + (MSI_DAT<8:0> << 4)
+ */
+ pid = vector >> 16;
+ irq = ((vector & 0xffff) - 0x800) >> 4;
+
+ irq += 16; /* offset for legacy */
+ irq &= MARVEL_IRQ_VEC_IRQ_MASK; /* not too many bits */
+ irq |= pid << MARVEL_IRQ_VEC_PE_SHIFT; /* merge the pid */
+
+ handle_irq(irq, regs);
+}
+
+static volatile unsigned long *
+io7_get_irq_ctl(unsigned int irq, struct io7 **pio7)
+{
+ volatile unsigned long *ctl;
+ unsigned int pid;
+ struct io7 *io7;
+
+ pid = irq >> MARVEL_IRQ_VEC_PE_SHIFT;
+
+ if (!(io7 = marvel_find_io7(pid))) {
+ printk(KERN_ERR
+ "%s for nonexistent io7 -- vec %x, pid %d\n",
+ __FUNCTION__, irq, pid);
+ return NULL;
+ }
+
+ irq &= MARVEL_IRQ_VEC_IRQ_MASK; /* isolate the vector */
+ irq -= 16; /* subtract legacy bias */
+
+ if (irq >= 0x180) {
+ printk(KERN_ERR
+ "%s for invalid irq -- pid %d adjusted irq %x\n",
+ __FUNCTION__, pid, irq);
+ return NULL;
+ }
+
+ ctl = &io7->csrs->PO7_LSI_CTL[irq & 0xff].csr; /* assume LSI */
+ if (irq >= 0x80) /* MSI */
+ ctl = &io7->csrs->PO7_MSI_CTL[((irq - 0x80) >> 5) & 0x0f].csr;
+
+ if (pio7) *pio7 = io7;
+ return ctl;
+}
+
+static void
+io7_enable_irq(unsigned int irq)
+{
+ volatile unsigned long *ctl;
+ struct io7 *io7;
+
+ ctl = io7_get_irq_ctl(irq, &io7);
+ if (!ctl || !io7) {
+ printk(KERN_ERR "%s: get_ctl failed for irq %x\n",
+ __FUNCTION__, irq);
+ return;
+ }
+
+ spin_lock(&io7->irq_lock);
+ *ctl |= 1UL << 24;
+ mb();
+ *ctl;
+ spin_unlock(&io7->irq_lock);
+}
+
+static void
+io7_disable_irq(unsigned int irq)
+{
+ volatile unsigned long *ctl;
+ struct io7 *io7;
+
+ ctl = io7_get_irq_ctl(irq, &io7);
+ if (!ctl || !io7) {
+ printk(KERN_ERR "%s: get_ctl failed for irq %x\n",
+ __FUNCTION__, irq);
+ return;
+ }
+
+ spin_lock(&io7->irq_lock);
+ *ctl &= ~(1UL << 24);
+ mb();
+ *ctl;
+ spin_unlock(&io7->irq_lock);
+}
+
+static unsigned int
+io7_startup_irq(unsigned int irq)
+{
+ io7_enable_irq(irq);
+ return 0; /* never anything pending */
+}
+
+static void
+io7_end_irq(unsigned int irq)
+{
+ if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS)))
+ io7_enable_irq(irq);
+}
+
+static void
+marvel_irq_noop(unsigned int irq)
+{
+ return;
+}
+
+static unsigned int
+marvel_irq_noop_return(unsigned int irq)
+{
+ return 0;
+}
+
+static struct hw_interrupt_type marvel_legacy_irq_type = {
+ .typename = "LEGACY",
+ .startup = marvel_irq_noop_return,
+ .shutdown = marvel_irq_noop,
+ .enable = marvel_irq_noop,
+ .disable = marvel_irq_noop,
+ .ack = marvel_irq_noop,
+ .end = marvel_irq_noop,
+};
+
+static struct hw_interrupt_type io7_lsi_irq_type = {
+ .typename = "LSI",
+ .startup = io7_startup_irq,
+ .shutdown = io7_disable_irq,
+ .enable = io7_enable_irq,
+ .disable = io7_disable_irq,
+ .ack = io7_disable_irq,
+ .end = io7_end_irq,
+};
+
+static struct hw_interrupt_type io7_msi_irq_type = {
+ .typename = "MSI",
+ .startup = io7_startup_irq,
+ .shutdown = io7_disable_irq,
+ .enable = io7_enable_irq,
+ .disable = io7_disable_irq,
+ .ack = marvel_irq_noop,
+ .end = io7_end_irq,
+};
+
+static void
+io7_redirect_irq(struct io7 *io7,
+ volatile unsigned long *csr,
+ unsigned int where)
+{
+ unsigned long val;
+
+ val = *csr;
+ val &= ~(0x1ffUL << 24); /* clear the target pid */
+ val |= ((unsigned long)where << 24); /* set the new target pid */
+
+ *csr = val;
+ mb();
+ *csr;
+}
+
+static void
+io7_redirect_one_lsi(struct io7 *io7, unsigned int which, unsigned int where)
+{
+ unsigned long val;
+
+ /*
+ * LSI_CTL has target PID @ 14
+ */
+ val = io7->csrs->PO7_LSI_CTL[which].csr;
+ val &= ~(0x1ffUL << 14); /* clear the target pid */
+ val |= ((unsigned long)where << 14); /* set the new target pid */
+
+ io7->csrs->PO7_LSI_CTL[which].csr = val;
+ mb();
+ io7->csrs->PO7_LSI_CTL[which].csr;
+}
+
+static void
+io7_redirect_one_msi(struct io7 *io7, unsigned int which, unsigned int where)
+{
+ unsigned long val;
+
+ /*
+ * MSI_CTL has target PID @ 14
+ */
+ val = io7->csrs->PO7_MSI_CTL[which].csr;
+ val &= ~(0x1ffUL << 14); /* clear the target pid */
+ val |= ((unsigned long)where << 14); /* set the new target pid */
+
+ io7->csrs->PO7_MSI_CTL[which].csr = val;
+ mb();
+ io7->csrs->PO7_MSI_CTL[which].csr;
+}
+
+static void __init
+init_one_io7_lsi(struct io7 *io7, unsigned int which, unsigned int where)
+{
+ /*
+ * LSI_CTL has target PID @ 14
+ */
+ io7->csrs->PO7_LSI_CTL[which].csr = ((unsigned long)where << 14);
+ mb();
+ io7->csrs->PO7_LSI_CTL[which].csr;
+}
+
+static void __init
+init_one_io7_msi(struct io7 *io7, unsigned int which, unsigned int where)
+{
+ /*
+ * MSI_CTL has target PID @ 14
+ */
+ io7->csrs->PO7_MSI_CTL[which].csr = ((unsigned long)where << 14);
+ mb();
+ io7->csrs->PO7_MSI_CTL[which].csr;
+}
+
+static void __init
+init_io7_irqs(struct io7 *io7,
+ struct hw_interrupt_type *lsi_ops,
+ struct hw_interrupt_type *msi_ops)
+{
+ long base = (io7->pe << MARVEL_IRQ_VEC_PE_SHIFT) + 16;
+ long i;
+
+ printk("Initializing interrupts for IO7 at PE %u - base %lx\n",
+ io7->pe, base);
+
+ /*
+ * Where should interrupts from this IO7 go?
+ *
+ * They really should be sent to the local CPU to avoid having to
+ * traverse the mesh, but if it's not an SMP kernel, they have to
+ * go to the boot CPU. Send them all to the boot CPU for now,
+ * as each secondary starts, it can redirect it's local device
+ * interrupts.
+ */
+ printk(" Interrupts reported to CPU at PE %u\n", boot_cpuid);
+
+ spin_lock(&io7->irq_lock);
+
+ /* set up the error irqs */
+ io7_redirect_irq(io7, &io7->csrs->HLT_CTL.csr, boot_cpuid);
+ io7_redirect_irq(io7, &io7->csrs->HPI_CTL.csr, boot_cpuid);
+ io7_redirect_irq(io7, &io7->csrs->CRD_CTL.csr, boot_cpuid);
+ io7_redirect_irq(io7, &io7->csrs->STV_CTL.csr, boot_cpuid);
+ io7_redirect_irq(io7, &io7->csrs->HEI_CTL.csr, boot_cpuid);
+
+ /* Set up the lsi irqs. */
+ for (i = 0; i < 128; ++i) {
+ irq_desc[base + i].status = IRQ_DISABLED | IRQ_LEVEL;
+ irq_desc[base + i].handler = lsi_ops;
+ }
+
+ /* Disable the implemented irqs in hardware. */
+ for (i = 0; i < 0x60; ++i)
+ init_one_io7_lsi(io7, i, boot_cpuid);
+
+ init_one_io7_lsi(io7, 0x74, boot_cpuid);
+ init_one_io7_lsi(io7, 0x75, boot_cpuid);
+
+
+ /* Set up the msi irqs. */
+ for (i = 128; i < (128 + 512); ++i) {
+ irq_desc[base + i].status = IRQ_DISABLED | IRQ_LEVEL;
+ irq_desc[base + i].handler = msi_ops;
+ }
+
+ for (i = 0; i < 16; ++i)
+ init_one_io7_msi(io7, i, boot_cpuid);
+
+ spin_unlock(&io7->irq_lock);
+}
+
+static void __init
+marvel_init_irq(void)
+{
+ int i;
+ struct io7 *io7 = NULL;
+
+ /* Reserve the legacy irqs. */
+ for (i = 0; i < 16; ++i) {
+ irq_desc[i].status = IRQ_DISABLED;
+ irq_desc[i].handler = &marvel_legacy_irq_type;
+ }
+
+ /* Init the io7 irqs. */
+ for (io7 = NULL; (io7 = marvel_next_io7(io7)) != NULL; )
+ init_io7_irqs(io7, &io7_lsi_irq_type, &io7_msi_irq_type);
+}
+
+static int
+marvel_map_irq(struct pci_dev *dev, u8 slot, u8 pin)
+{
+ struct pci_controller *hose = dev->sysdata;
+ struct io7_port *io7_port = hose->sysdata;
+ struct io7 *io7 = io7_port->io7;
+ int msi_loc, msi_data_off;
+ u16 msg_ctl;
+ u16 msg_dat;
+ u8 intline;
+ int irq;
+
+ pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &intline);
+ irq = intline;
+
+ msi_loc = pci_find_capability(dev, PCI_CAP_ID_MSI);
+ msg_ctl = 0;
+ if (msi_loc)
+ pci_read_config_word(dev, msi_loc + PCI_MSI_FLAGS, &msg_ctl);
+
+ if (msg_ctl & PCI_MSI_FLAGS_ENABLE) {
+ msi_data_off = PCI_MSI_DATA_32;
+ if (msg_ctl & PCI_MSI_FLAGS_64BIT)
+ msi_data_off = PCI_MSI_DATA_64;
+ pci_read_config_word(dev, msi_loc + msi_data_off, &msg_dat);
+
+ irq = msg_dat & 0x1ff; /* we use msg_data<8:0> */
+ irq += 0x80; /* offset for lsi */
+
+#if 1
+ printk("PCI:%d:%d:%d (hose %d) [%s] is using MSI\n",
+ dev->bus->number,
+ PCI_SLOT(dev->devfn),
+ PCI_FUNC(dev->devfn),
+ hose->index,
+ pci_pretty_name (dev));
+ printk(" %d message(s) from 0x%04x\n",
+ 1 << ((msg_ctl & PCI_MSI_FLAGS_QSIZE) >> 4),
+ msg_dat);
+ printk(" reporting on %d IRQ(s) from %d (0x%x)\n",
+ 1 << ((msg_ctl & PCI_MSI_FLAGS_QSIZE) >> 4),
+ (irq + 16) | (io7->pe << MARVEL_IRQ_VEC_PE_SHIFT),
+ (irq + 16) | (io7->pe << MARVEL_IRQ_VEC_PE_SHIFT));
+#endif
+
+#if 0
+ pci_write_config_word(dev, msi_loc + PCI_MSI_FLAGS,
+ msg_ctl & ~PCI_MSI_FLAGS_ENABLE);
+ pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &intline);
+ irq = intline;
+
+ printk(" forcing LSI interrupt on irq %d [0x%x]\n", irq, irq);
+#endif
+ }
+
+ irq += 16; /* offset for legacy */
+ irq |= io7->pe << MARVEL_IRQ_VEC_PE_SHIFT; /* merge the pid */
+
+ return irq;
+}
+
+static void __init
+marvel_init_pci(void)
+{
+ struct io7 *io7;
+
+ marvel_register_error_handlers();
+
+ pci_probe_only = 1;
+ common_init_pci();
+
+#ifdef CONFIG_VGA_HOSE
+ locate_and_init_vga(NULL);
+#endif
+
+ /* Clear any io7 errors. */
+ for (io7 = NULL; (io7 = marvel_next_io7(io7)) != NULL; )
+ io7_clear_errors(io7);
+}
+
+static void
+marvel_init_rtc(void)
+{
+ init_rtc_irq();
+}
+
+static void
+marvel_smp_callin(void)
+{
+ int cpuid = hard_smp_processor_id();
+ struct io7 *io7 = marvel_find_io7(cpuid);
+ unsigned int i;
+
+ if (!io7)
+ return;
+
+ /*
+ * There is a local IO7 - redirect all of its interrupts here.
+ */
+ printk("Redirecting IO7 interrupts to local CPU at PE %u\n", cpuid);
+
+ /* Redirect the error IRQS here. */
+ io7_redirect_irq(io7, &io7->csrs->HLT_CTL.csr, cpuid);
+ io7_redirect_irq(io7, &io7->csrs->HPI_CTL.csr, cpuid);
+ io7_redirect_irq(io7, &io7->csrs->CRD_CTL.csr, cpuid);
+ io7_redirect_irq(io7, &io7->csrs->STV_CTL.csr, cpuid);
+ io7_redirect_irq(io7, &io7->csrs->HEI_CTL.csr, cpuid);
+
+ /* Redirect the implemented LSIs here. */
+ for (i = 0; i < 0x60; ++i)
+ io7_redirect_one_lsi(io7, i, cpuid);
+
+ io7_redirect_one_lsi(io7, 0x74, cpuid);
+ io7_redirect_one_lsi(io7, 0x75, cpuid);
+
+ /* Redirect the MSIs here. */
+ for (i = 0; i < 16; ++i)
+ io7_redirect_one_msi(io7, i, cpuid);
+}
+
+/*
+ * System Vectors
+ */
+struct alpha_machine_vector marvel_ev7_mv __initmv = {
+ .vector_name = "MARVEL/EV7",
+ DO_EV7_MMU,
+ DO_DEFAULT_RTC,
+ DO_MARVEL_IO,
+ .machine_check = marvel_machine_check,
+ .max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS,
+ .min_io_address = DEFAULT_IO_BASE,
+ .min_mem_address = DEFAULT_MEM_BASE,
+ .pci_dac_offset = IO7_DAC_OFFSET,
+
+ .nr_irqs = MARVEL_NR_IRQS,
+ .device_interrupt = io7_device_interrupt,
+
+ .agp_info = marvel_agp_info,
+
+ .smp_callin = marvel_smp_callin,
+ .init_arch = marvel_init_arch,
+ .init_irq = marvel_init_irq,
+ .init_rtc = marvel_init_rtc,
+ .init_pci = marvel_init_pci,
+ .kill_arch = marvel_kill_arch,
+ .pci_map_irq = marvel_map_irq,
+ .pci_swizzle = common_swizzle,
+
+ .pa_to_nid = marvel_pa_to_nid,
+ .cpuid_to_nid = marvel_cpuid_to_nid,
+ .node_mem_start = marvel_node_mem_start,
+ .node_mem_size = marvel_node_mem_size,
+};
+ALIAS_MV(marvel_ev7)
diff --git a/arch/alpha/kernel/sys_miata.c b/arch/alpha/kernel/sys_miata.c
new file mode 100644
index 000000000000..61ac56f8eeea
--- /dev/null
+++ b/arch/alpha/kernel/sys_miata.c
@@ -0,0 +1,289 @@
+/*
+ * linux/arch/alpha/kernel/sys_miata.c
+ *
+ * Copyright (C) 1995 David A Rusling
+ * Copyright (C) 1996 Jay A Estabrook
+ * Copyright (C) 1998, 1999, 2000 Richard Henderson
+ *
+ * Code supporting the MIATA (EV56+PYXIS).
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/mm.h>
+#include <linux/sched.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/reboot.h>
+
+#include <asm/ptrace.h>
+#include <asm/system.h>
+#include <asm/dma.h>
+#include <asm/irq.h>
+#include <asm/mmu_context.h>
+#include <asm/io.h>
+#include <asm/pgtable.h>
+#include <asm/core_cia.h>
+#include <asm/tlbflush.h>
+
+#include "proto.h"
+#include "irq_impl.h"
+#include "pci_impl.h"
+#include "machvec_impl.h"
+
+
+static void
+miata_srm_device_interrupt(unsigned long vector, struct pt_regs * regs)
+{
+ int irq;
+
+ irq = (vector - 0x800) >> 4;
+
+ /*
+ * I really hate to do this, but the MIATA SRM console ignores the
+ * low 8 bits in the interrupt summary register, and reports the
+ * vector 0x80 *lower* than I expected from the bit numbering in
+ * the documentation.
+ * This was done because the low 8 summary bits really aren't used
+ * for reporting any interrupts (the PCI-ISA bridge, bit 7, isn't
+ * used for this purpose, as PIC interrupts are delivered as the
+ * vectors 0x800-0x8f0).
+ * But I really don't want to change the fixup code for allocation
+ * of IRQs, nor the alpha_irq_mask maintenance stuff, both of which
+ * look nice and clean now.
+ * So, here's this grotty hack... :-(
+ */
+ if (irq >= 16)
+ irq = irq + 8;
+
+ handle_irq(irq, regs);
+}
+
+static void __init
+miata_init_irq(void)
+{
+ if (alpha_using_srm)
+ alpha_mv.device_interrupt = miata_srm_device_interrupt;
+
+#if 0
+ /* These break on MiataGL so we'll try not to do it at all. */
+ *(vulp)PYXIS_INT_HILO = 0x000000B2UL; mb(); /* ISA/NMI HI */
+ *(vulp)PYXIS_RT_COUNT = 0UL; mb(); /* clear count */
+#endif
+
+ init_i8259a_irqs();
+
+ /* Not interested in the bogus interrupts (3,10), Fan Fault (0),
+ NMI (1), or EIDE (9).
+
+ We also disable the risers (4,5), since we don't know how to
+ route the interrupts behind the bridge. */
+ init_pyxis_irqs(0x63b0000);
+
+ common_init_isa_dma();
+ setup_irq(16+2, &halt_switch_irqaction); /* SRM only? */
+ setup_irq(16+6, &timer_cascade_irqaction);
+}
+
+
+/*
+ * PCI Fixup configuration.
+ *
+ * Summary @ PYXIS_INT_REQ:
+ * Bit Meaning
+ * 0 Fan Fault
+ * 1 NMI
+ * 2 Halt/Reset switch
+ * 3 none
+ * 4 CID0 (Riser ID)
+ * 5 CID1 (Riser ID)
+ * 6 Interval timer
+ * 7 PCI-ISA Bridge
+ * 8 Ethernet
+ * 9 EIDE (deprecated, ISA 14/15 used)
+ *10 none
+ *11 USB
+ *12 Interrupt Line A from slot 4
+ *13 Interrupt Line B from slot 4
+ *14 Interrupt Line C from slot 4
+ *15 Interrupt Line D from slot 4
+ *16 Interrupt Line A from slot 5
+ *17 Interrupt line B from slot 5
+ *18 Interrupt Line C from slot 5
+ *19 Interrupt Line D from slot 5
+ *20 Interrupt Line A from slot 1
+ *21 Interrupt Line B from slot 1
+ *22 Interrupt Line C from slot 1
+ *23 Interrupt Line D from slot 1
+ *24 Interrupt Line A from slot 2
+ *25 Interrupt Line B from slot 2
+ *26 Interrupt Line C from slot 2
+ *27 Interrupt Line D from slot 2
+ *27 Interrupt Line A from slot 3
+ *29 Interrupt Line B from slot 3
+ *30 Interrupt Line C from slot 3
+ *31 Interrupt Line D from slot 3
+ *
+ * The device to slot mapping looks like:
+ *
+ * Slot Device
+ * 3 DC21142 Ethernet
+ * 4 EIDE CMD646
+ * 5 none
+ * 6 USB
+ * 7 PCI-ISA bridge
+ * 8 PCI-PCI Bridge (SBU Riser)
+ * 9 none
+ * 10 none
+ * 11 PCI on board slot 4 (SBU Riser)
+ * 12 PCI on board slot 5 (SBU Riser)
+ *
+ * These are behind the bridge, so I'm not sure what to do...
+ *
+ * 13 PCI on board slot 1 (SBU Riser)
+ * 14 PCI on board slot 2 (SBU Riser)
+ * 15 PCI on board slot 3 (SBU Riser)
+ *
+ *
+ * This two layered interrupt approach means that we allocate IRQ 16 and
+ * above for PCI interrupts. The IRQ relates to which bit the interrupt
+ * comes in on. This makes interrupt processing much easier.
+ */
+
+static int __init
+miata_map_irq(struct pci_dev *dev, u8 slot, u8 pin)
+{
+ static char irq_tab[18][5] __initdata = {
+ /*INT INTA INTB INTC INTD */
+ {16+ 8, 16+ 8, 16+ 8, 16+ 8, 16+ 8}, /* IdSel 14, DC21142 */
+ { -1, -1, -1, -1, -1}, /* IdSel 15, EIDE */
+ { -1, -1, -1, -1, -1}, /* IdSel 16, none */
+ { -1, -1, -1, -1, -1}, /* IdSel 17, none */
+ { -1, -1, -1, -1, -1}, /* IdSel 18, PCI-ISA */
+ { -1, -1, -1, -1, -1}, /* IdSel 19, PCI-PCI */
+ { -1, -1, -1, -1, -1}, /* IdSel 20, none */
+ { -1, -1, -1, -1, -1}, /* IdSel 21, none */
+ {16+12, 16+12, 16+13, 16+14, 16+15}, /* IdSel 22, slot 4 */
+ {16+16, 16+16, 16+17, 16+18, 16+19}, /* IdSel 23, slot 5 */
+ /* the next 7 are actually on PCI bus 1, across the bridge */
+ {16+11, 16+11, 16+11, 16+11, 16+11}, /* IdSel 24, QLISP/GL*/
+ { -1, -1, -1, -1, -1}, /* IdSel 25, none */
+ { -1, -1, -1, -1, -1}, /* IdSel 26, none */
+ { -1, -1, -1, -1, -1}, /* IdSel 27, none */
+ {16+20, 16+20, 16+21, 16+22, 16+23}, /* IdSel 28, slot 1 */
+ {16+24, 16+24, 16+25, 16+26, 16+27}, /* IdSel 29, slot 2 */
+ {16+28, 16+28, 16+29, 16+30, 16+31}, /* IdSel 30, slot 3 */
+ /* This bridge is on the main bus of the later orig MIATA */
+ { -1, -1, -1, -1, -1}, /* IdSel 31, PCI-PCI */
+ };
+ const long min_idsel = 3, max_idsel = 20, irqs_per_slot = 5;
+
+ /* the USB function of the 82c693 has it's interrupt connected to
+ the 2nd 8259 controller. So we have to check for it first. */
+
+ if((slot == 7) && (PCI_FUNC(dev->devfn) == 3)) {
+ u8 irq=0;
+
+ if(pci_read_config_byte(pci_find_slot(dev->bus->number, dev->devfn & ~(7)), 0x40,&irq)!=PCIBIOS_SUCCESSFUL)
+ return -1;
+ else
+ return irq;
+ }
+
+ return COMMON_TABLE_LOOKUP;
+}
+
+static u8 __init
+miata_swizzle(struct pci_dev *dev, u8 *pinp)
+{
+ int slot, pin = *pinp;
+
+ if (dev->bus->number == 0) {
+ slot = PCI_SLOT(dev->devfn);
+ }
+ /* Check for the built-in bridge. */
+ else if ((PCI_SLOT(dev->bus->self->devfn) == 8) ||
+ (PCI_SLOT(dev->bus->self->devfn) == 20)) {
+ slot = PCI_SLOT(dev->devfn) + 9;
+ }
+ else
+ {
+ /* Must be a card-based bridge. */
+ do {
+ if ((PCI_SLOT(dev->bus->self->devfn) == 8) ||
+ (PCI_SLOT(dev->bus->self->devfn) == 20)) {
+ slot = PCI_SLOT(dev->devfn) + 9;
+ break;
+ }
+ pin = bridge_swizzle(pin, PCI_SLOT(dev->devfn));
+
+ /* Move up the chain of bridges. */
+ dev = dev->bus->self;
+ /* Slot of the next bridge. */
+ slot = PCI_SLOT(dev->devfn);
+ } while (dev->bus->self);
+ }
+ *pinp = pin;
+ return slot;
+}
+
+static void __init
+miata_init_pci(void)
+{
+ cia_init_pci();
+ SMC669_Init(0); /* it might be a GL (fails harmlessly if not) */
+ es1888_init();
+}
+
+static void
+miata_kill_arch(int mode)
+{
+ cia_kill_arch(mode);
+
+#ifndef ALPHA_RESTORE_SRM_SETUP
+ switch(mode) {
+ case LINUX_REBOOT_CMD_RESTART:
+ /* Who said DEC engineers have no sense of humor? ;-) */
+ if (alpha_using_srm) {
+ *(vuip) PYXIS_RESET = 0x0000dead;
+ mb();
+ }
+ break;
+ case LINUX_REBOOT_CMD_HALT:
+ break;
+ case LINUX_REBOOT_CMD_POWER_OFF:
+ break;
+ }
+
+ halt();
+#endif
+}
+
+
+/*
+ * The System Vector
+ */
+
+struct alpha_machine_vector miata_mv __initmv = {
+ .vector_name = "Miata",
+ DO_EV5_MMU,
+ DO_DEFAULT_RTC,
+ DO_PYXIS_IO,
+ .machine_check = cia_machine_check,
+ .max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS,
+ .min_io_address = DEFAULT_IO_BASE,
+ .min_mem_address = DEFAULT_MEM_BASE,
+ .pci_dac_offset = PYXIS_DAC_OFFSET,
+
+ .nr_irqs = 48,
+ .device_interrupt = pyxis_device_interrupt,
+
+ .init_arch = pyxis_init_arch,
+ .init_irq = miata_init_irq,
+ .init_rtc = common_init_rtc,
+ .init_pci = miata_init_pci,
+ .kill_arch = miata_kill_arch,
+ .pci_map_irq = miata_map_irq,
+ .pci_swizzle = miata_swizzle,
+};
+ALIAS_MV(miata)
diff --git a/arch/alpha/kernel/sys_mikasa.c b/arch/alpha/kernel/sys_mikasa.c
new file mode 100644
index 000000000000..d78a0daa6168
--- /dev/null
+++ b/arch/alpha/kernel/sys_mikasa.c
@@ -0,0 +1,265 @@
+/*
+ * linux/arch/alpha/kernel/sys_mikasa.c
+ *
+ * Copyright (C) 1995 David A Rusling
+ * Copyright (C) 1996 Jay A Estabrook
+ * Copyright (C) 1998, 1999 Richard Henderson
+ *
+ * Code supporting the MIKASA (AlphaServer 1000).
+ */
+
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/mm.h>
+#include <linux/sched.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/bitops.h>
+
+#include <asm/ptrace.h>
+#include <asm/system.h>
+#include <asm/dma.h>
+#include <asm/irq.h>
+#include <asm/mmu_context.h>
+#include <asm/io.h>
+#include <asm/pgtable.h>
+#include <asm/core_apecs.h>
+#include <asm/core_cia.h>
+#include <asm/tlbflush.h>
+
+#include "proto.h"
+#include "irq_impl.h"
+#include "pci_impl.h"
+#include "machvec_impl.h"
+
+
+/* Note mask bit is true for ENABLED irqs. */
+static int cached_irq_mask;
+
+static inline void
+mikasa_update_irq_hw(int mask)
+{
+ outw(mask, 0x536);
+}
+
+static inline void
+mikasa_enable_irq(unsigned int irq)
+{
+ mikasa_update_irq_hw(cached_irq_mask |= 1 << (irq - 16));
+}
+
+static void
+mikasa_disable_irq(unsigned int irq)
+{
+ mikasa_update_irq_hw(cached_irq_mask &= ~(1 << (irq - 16)));
+}
+
+static unsigned int
+mikasa_startup_irq(unsigned int irq)
+{
+ mikasa_enable_irq(irq);
+ return 0;
+}
+
+static void
+mikasa_end_irq(unsigned int irq)
+{
+ if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS)))
+ mikasa_enable_irq(irq);
+}
+
+static struct hw_interrupt_type mikasa_irq_type = {
+ .typename = "MIKASA",
+ .startup = mikasa_startup_irq,
+ .shutdown = mikasa_disable_irq,
+ .enable = mikasa_enable_irq,
+ .disable = mikasa_disable_irq,
+ .ack = mikasa_disable_irq,
+ .end = mikasa_end_irq,
+};
+
+static void
+mikasa_device_interrupt(unsigned long vector, struct pt_regs *regs)
+{
+ unsigned long pld;
+ unsigned int i;
+
+ /* Read the interrupt summary registers */
+ pld = (((~inw(0x534) & 0x0000ffffUL) << 16)
+ | (((unsigned long) inb(0xa0)) << 8)
+ | inb(0x20));
+
+ /*
+ * Now for every possible bit set, work through them and call
+ * the appropriate interrupt handler.
+ */
+ while (pld) {
+ i = ffz(~pld);
+ pld &= pld - 1; /* clear least bit set */
+ if (i < 16) {
+ isa_device_interrupt(vector, regs);
+ } else {
+ handle_irq(i, regs);
+ }
+ }
+}
+
+static void __init
+mikasa_init_irq(void)
+{
+ long i;
+
+ if (alpha_using_srm)
+ alpha_mv.device_interrupt = srm_device_interrupt;
+
+ mikasa_update_irq_hw(0);
+
+ for (i = 16; i < 32; ++i) {
+ irq_desc[i].status = IRQ_DISABLED | IRQ_LEVEL;
+ irq_desc[i].handler = &mikasa_irq_type;
+ }
+
+ init_i8259a_irqs();
+ common_init_isa_dma();
+}
+
+
+/*
+ * PCI Fixup configuration.
+ *
+ * Summary @ 0x536:
+ * Bit Meaning
+ * 0 Interrupt Line A from slot 0
+ * 1 Interrupt Line B from slot 0
+ * 2 Interrupt Line C from slot 0
+ * 3 Interrupt Line D from slot 0
+ * 4 Interrupt Line A from slot 1
+ * 5 Interrupt line B from slot 1
+ * 6 Interrupt Line C from slot 1
+ * 7 Interrupt Line D from slot 1
+ * 8 Interrupt Line A from slot 2
+ * 9 Interrupt Line B from slot 2
+ *10 Interrupt Line C from slot 2
+ *11 Interrupt Line D from slot 2
+ *12 NCR 810 SCSI
+ *13 Power Supply Fail
+ *14 Temperature Warn
+ *15 Reserved
+ *
+ * The device to slot mapping looks like:
+ *
+ * Slot Device
+ * 6 NCR SCSI controller
+ * 7 Intel PCI-EISA bridge chip
+ * 11 PCI on board slot 0
+ * 12 PCI on board slot 1
+ * 13 PCI on board slot 2
+ *
+ *
+ * This two layered interrupt approach means that we allocate IRQ 16 and
+ * above for PCI interrupts. The IRQ relates to which bit the interrupt
+ * comes in on. This makes interrupt processing much easier.
+ */
+
+static int __init
+mikasa_map_irq(struct pci_dev *dev, u8 slot, u8 pin)
+{
+ static char irq_tab[8][5] __initdata = {
+ /*INT INTA INTB INTC INTD */
+ {16+12, 16+12, 16+12, 16+12, 16+12}, /* IdSel 17, SCSI */
+ { -1, -1, -1, -1, -1}, /* IdSel 18, PCEB */
+ { -1, -1, -1, -1, -1}, /* IdSel 19, ???? */
+ { -1, -1, -1, -1, -1}, /* IdSel 20, ???? */
+ { -1, -1, -1, -1, -1}, /* IdSel 21, ???? */
+ { 16+0, 16+0, 16+1, 16+2, 16+3}, /* IdSel 22, slot 0 */
+ { 16+4, 16+4, 16+5, 16+6, 16+7}, /* IdSel 23, slot 1 */
+ { 16+8, 16+8, 16+9, 16+10, 16+11}, /* IdSel 24, slot 2 */
+ };
+ const long min_idsel = 6, max_idsel = 13, irqs_per_slot = 5;
+ return COMMON_TABLE_LOOKUP;
+}
+
+
+#if defined(CONFIG_ALPHA_GENERIC) || !defined(CONFIG_ALPHA_PRIMO)
+static void
+mikasa_apecs_machine_check(unsigned long vector, unsigned long la_ptr,
+ struct pt_regs * regs)
+{
+#define MCHK_NO_DEVSEL 0x205U
+#define MCHK_NO_TABT 0x204U
+
+ struct el_common *mchk_header;
+ unsigned int code;
+
+ mchk_header = (struct el_common *)la_ptr;
+
+ /* Clear the error before any reporting. */
+ mb();
+ mb(); /* magic */
+ draina();
+ apecs_pci_clr_err();
+ wrmces(0x7);
+ mb();
+
+ code = mchk_header->code;
+ process_mcheck_info(vector, la_ptr, regs, "MIKASA APECS",
+ (mcheck_expected(0)
+ && (code == MCHK_NO_DEVSEL
+ || code == MCHK_NO_TABT)));
+}
+#endif
+
+
+/*
+ * The System Vector
+ */
+
+#if defined(CONFIG_ALPHA_GENERIC) || !defined(CONFIG_ALPHA_PRIMO)
+struct alpha_machine_vector mikasa_mv __initmv = {
+ .vector_name = "Mikasa",
+ DO_EV4_MMU,
+ DO_DEFAULT_RTC,
+ DO_APECS_IO,
+ .machine_check = mikasa_apecs_machine_check,
+ .max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS,
+ .min_io_address = DEFAULT_IO_BASE,
+ .min_mem_address = APECS_AND_LCA_DEFAULT_MEM_BASE,
+
+ .nr_irqs = 32,
+ .device_interrupt = mikasa_device_interrupt,
+
+ .init_arch = apecs_init_arch,
+ .init_irq = mikasa_init_irq,
+ .init_rtc = common_init_rtc,
+ .init_pci = common_init_pci,
+ .pci_map_irq = mikasa_map_irq,
+ .pci_swizzle = common_swizzle,
+};
+ALIAS_MV(mikasa)
+#endif
+
+#if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_PRIMO)
+struct alpha_machine_vector mikasa_primo_mv __initmv = {
+ .vector_name = "Mikasa-Primo",
+ DO_EV5_MMU,
+ DO_DEFAULT_RTC,
+ DO_CIA_IO,
+ .machine_check = cia_machine_check,
+ .max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS,
+ .min_io_address = DEFAULT_IO_BASE,
+ .min_mem_address = CIA_DEFAULT_MEM_BASE,
+
+ .nr_irqs = 32,
+ .device_interrupt = mikasa_device_interrupt,
+
+ .init_arch = cia_init_arch,
+ .init_irq = mikasa_init_irq,
+ .init_rtc = common_init_rtc,
+ .init_pci = cia_init_pci,
+ .kill_arch = cia_kill_arch,
+ .pci_map_irq = mikasa_map_irq,
+ .pci_swizzle = common_swizzle,
+};
+ALIAS_MV(mikasa_primo)
+#endif
diff --git a/arch/alpha/kernel/sys_nautilus.c b/arch/alpha/kernel/sys_nautilus.c
new file mode 100644
index 000000000000..c0d696efec5b
--- /dev/null
+++ b/arch/alpha/kernel/sys_nautilus.c
@@ -0,0 +1,269 @@
+/*
+ * linux/arch/alpha/kernel/sys_nautilus.c
+ *
+ * Copyright (C) 1995 David A Rusling
+ * Copyright (C) 1998 Richard Henderson
+ * Copyright (C) 1999 Alpha Processor, Inc.,
+ * (David Daniel, Stig Telfer, Soohoon Lee)
+ *
+ * Code supporting NAUTILUS systems.
+ *
+ *
+ * NAUTILUS has the following I/O features:
+ *
+ * a) Driven by AMD 751 aka IRONGATE (northbridge):
+ * 4 PCI slots
+ * 1 AGP slot
+ *
+ * b) Driven by ALI M1543C (southbridge)
+ * 2 ISA slots
+ * 2 IDE connectors
+ * 1 dual drive capable FDD controller
+ * 2 serial ports
+ * 1 ECP/EPP/SP parallel port
+ * 2 USB ports
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/mm.h>
+#include <linux/sched.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/reboot.h>
+#include <linux/bootmem.h>
+#include <linux/bitops.h>
+
+#include <asm/ptrace.h>
+#include <asm/system.h>
+#include <asm/dma.h>
+#include <asm/irq.h>
+#include <asm/mmu_context.h>
+#include <asm/io.h>
+#include <asm/pci.h>
+#include <asm/pgtable.h>
+#include <asm/core_irongate.h>
+#include <asm/hwrpb.h>
+#include <asm/tlbflush.h>
+
+#include "proto.h"
+#include "err_impl.h"
+#include "irq_impl.h"
+#include "pci_impl.h"
+#include "machvec_impl.h"
+
+
+static void __init
+nautilus_init_irq(void)
+{
+ if (alpha_using_srm) {
+ alpha_mv.device_interrupt = srm_device_interrupt;
+ }
+
+ init_i8259a_irqs();
+ common_init_isa_dma();
+}
+
+static int __init
+nautilus_map_irq(struct pci_dev *dev, u8 slot, u8 pin)
+{
+ /* Preserve the IRQ set up by the console. */
+
+ u8 irq;
+ pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &irq);
+ return irq;
+}
+
+void
+nautilus_kill_arch(int mode)
+{
+ struct pci_bus *bus = pci_isa_hose->bus;
+ u32 pmuport;
+ int off;
+
+ switch (mode) {
+ case LINUX_REBOOT_CMD_RESTART:
+ if (! alpha_using_srm) {
+ u8 t8;
+ pci_bus_read_config_byte(bus, 0x38, 0x43, &t8);
+ pci_bus_write_config_byte(bus, 0x38, 0x43, t8 | 0x80);
+ outb(1, 0x92);
+ outb(0, 0x92);
+ /* NOTREACHED */
+ }
+ break;
+
+ case LINUX_REBOOT_CMD_POWER_OFF:
+ /* Assume M1543C */
+ off = 0x2000; /* SLP_TYPE = 0, SLP_EN = 1 */
+ pci_bus_read_config_dword(bus, 0x88, 0x10, &pmuport);
+ if (!pmuport) {
+ /* M1535D/D+ */
+ off = 0x3400; /* SLP_TYPE = 5, SLP_EN = 1 */
+ pci_bus_read_config_dword(bus, 0x88, 0xe0, &pmuport);
+ }
+ pmuport &= 0xfffe;
+ outw(0xffff, pmuport); /* Clear pending events. */
+ outw(off, pmuport + 4);
+ /* NOTREACHED */
+ break;
+ }
+}
+
+/* Perform analysis of a machine check that arrived from the system (NMI) */
+
+static void
+naut_sys_machine_check(unsigned long vector, unsigned long la_ptr,
+ struct pt_regs *regs)
+{
+ printk("PC %lx RA %lx\n", regs->pc, regs->r26);
+ irongate_pci_clr_err();
+}
+
+/* Machine checks can come from two sources - those on the CPU and those
+ in the system. They are analysed separately but all starts here. */
+
+void
+nautilus_machine_check(unsigned long vector, unsigned long la_ptr,
+ struct pt_regs *regs)
+{
+ char *mchk_class;
+
+ /* Now for some analysis. Machine checks fall into two classes --
+ those picked up by the system, and those picked up by the CPU.
+ Add to that the two levels of severity - correctable or not. */
+
+ if (vector == SCB_Q_SYSMCHK
+ && ((IRONGATE0->dramms & 0x300) == 0x300)) {
+ unsigned long nmi_ctl;
+
+ /* Clear ALI NMI */
+ nmi_ctl = inb(0x61);
+ nmi_ctl |= 0x0c;
+ outb(nmi_ctl, 0x61);
+ nmi_ctl &= ~0x0c;
+ outb(nmi_ctl, 0x61);
+
+ /* Write again clears error bits. */
+ IRONGATE0->stat_cmd = IRONGATE0->stat_cmd & ~0x100;
+ mb();
+ IRONGATE0->stat_cmd;
+
+ /* Write again clears error bits. */
+ IRONGATE0->dramms = IRONGATE0->dramms;
+ mb();
+ IRONGATE0->dramms;
+
+ draina();
+ wrmces(0x7);
+ mb();
+ return;
+ }
+
+ if (vector == SCB_Q_SYSERR)
+ mchk_class = "Correctable";
+ else if (vector == SCB_Q_SYSMCHK)
+ mchk_class = "Fatal";
+ else {
+ ev6_machine_check(vector, la_ptr, regs);
+ return;
+ }
+
+ printk(KERN_CRIT "NAUTILUS Machine check 0x%lx "
+ "[%s System Machine Check (NMI)]\n",
+ vector, mchk_class);
+
+ naut_sys_machine_check(vector, la_ptr, regs);
+
+ /* Tell the PALcode to clear the machine check */
+ draina();
+ wrmces(0x7);
+ mb();
+}
+
+extern void free_reserved_mem(void *, void *);
+
+static struct resource irongate_mem = {
+ .name = "Irongate PCI MEM",
+ .flags = IORESOURCE_MEM,
+};
+
+void __init
+nautilus_init_pci(void)
+{
+ struct pci_controller *hose = hose_head;
+ struct pci_bus *bus;
+ struct pci_dev *irongate;
+ unsigned long bus_align, bus_size, pci_mem;
+ unsigned long memtop = max_low_pfn << PAGE_SHIFT;
+
+ /* Scan our single hose. */
+ bus = pci_scan_bus(0, alpha_mv.pci_ops, hose);
+ hose->bus = bus;
+
+ irongate = pci_find_slot(0, 0);
+ bus->self = irongate;
+ bus->resource[1] = &irongate_mem;
+
+ pci_bus_size_bridges(bus);
+
+ /* IO port range. */
+ bus->resource[0]->start = 0;
+ bus->resource[0]->end = 0xffff;
+
+ /* Set up PCI memory range - limit is hardwired to 0xffffffff,
+ base must be at aligned to 16Mb. */
+ bus_align = bus->resource[1]->start;
+ bus_size = bus->resource[1]->end + 1 - bus_align;
+ if (bus_align < 0x1000000UL)
+ bus_align = 0x1000000UL;
+
+ pci_mem = (0x100000000UL - bus_size) & -bus_align;
+
+ bus->resource[1]->start = pci_mem;
+ bus->resource[1]->end = 0xffffffffUL;
+ if (request_resource(&iomem_resource, bus->resource[1]) < 0)
+ printk(KERN_ERR "Failed to request MEM on hose 0\n");
+
+ if (pci_mem < memtop)
+ memtop = pci_mem;
+ if (memtop > alpha_mv.min_mem_address) {
+ free_reserved_mem(__va(alpha_mv.min_mem_address),
+ __va(memtop));
+ printk("nautilus_init_pci: %ldk freed\n",
+ (memtop - alpha_mv.min_mem_address) >> 10);
+ }
+
+ if ((IRONGATE0->dev_vendor >> 16) > 0x7006) /* Albacore? */
+ IRONGATE0->pci_mem = pci_mem;
+
+ pci_bus_assign_resources(bus);
+ pci_fixup_irqs(alpha_mv.pci_swizzle, alpha_mv.pci_map_irq);
+}
+
+/*
+ * The System Vectors
+ */
+
+struct alpha_machine_vector nautilus_mv __initmv = {
+ .vector_name = "Nautilus",
+ DO_EV6_MMU,
+ DO_DEFAULT_RTC,
+ DO_IRONGATE_IO,
+ .machine_check = nautilus_machine_check,
+ .max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS,
+ .min_io_address = DEFAULT_IO_BASE,
+ .min_mem_address = IRONGATE_DEFAULT_MEM_BASE,
+
+ .nr_irqs = 16,
+ .device_interrupt = isa_device_interrupt,
+
+ .init_arch = irongate_init_arch,
+ .init_irq = nautilus_init_irq,
+ .init_rtc = common_init_rtc,
+ .init_pci = nautilus_init_pci,
+ .kill_arch = nautilus_kill_arch,
+ .pci_map_irq = nautilus_map_irq,
+ .pci_swizzle = common_swizzle,
+};
+ALIAS_MV(nautilus)
diff --git a/arch/alpha/kernel/sys_noritake.c b/arch/alpha/kernel/sys_noritake.c
new file mode 100644
index 000000000000..65061f5d7410
--- /dev/null
+++ b/arch/alpha/kernel/sys_noritake.c
@@ -0,0 +1,347 @@
+/*
+ * linux/arch/alpha/kernel/sys_noritake.c
+ *
+ * Copyright (C) 1995 David A Rusling
+ * Copyright (C) 1996 Jay A Estabrook
+ * Copyright (C) 1998, 1999 Richard Henderson
+ *
+ * Code supporting the NORITAKE (AlphaServer 1000A),
+ * CORELLE (AlphaServer 800), and ALCOR Primo (AlphaStation 600A).
+ */
+
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/mm.h>
+#include <linux/sched.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/bitops.h>
+
+#include <asm/ptrace.h>
+#include <asm/system.h>
+#include <asm/dma.h>
+#include <asm/irq.h>
+#include <asm/mmu_context.h>
+#include <asm/io.h>
+#include <asm/pgtable.h>
+#include <asm/core_apecs.h>
+#include <asm/core_cia.h>
+#include <asm/tlbflush.h>
+
+#include "proto.h"
+#include "irq_impl.h"
+#include "pci_impl.h"
+#include "machvec_impl.h"
+
+/* Note mask bit is true for ENABLED irqs. */
+static int cached_irq_mask;
+
+static inline void
+noritake_update_irq_hw(int irq, int mask)
+{
+ int port = 0x54a;
+ if (irq >= 32) {
+ mask >>= 16;
+ port = 0x54c;
+ }
+ outw(mask, port);
+}
+
+static void
+noritake_enable_irq(unsigned int irq)
+{
+ noritake_update_irq_hw(irq, cached_irq_mask |= 1 << (irq - 16));
+}
+
+static void
+noritake_disable_irq(unsigned int irq)
+{
+ noritake_update_irq_hw(irq, cached_irq_mask &= ~(1 << (irq - 16)));
+}
+
+static unsigned int
+noritake_startup_irq(unsigned int irq)
+{
+ noritake_enable_irq(irq);
+ return 0;
+}
+
+static struct hw_interrupt_type noritake_irq_type = {
+ .typename = "NORITAKE",
+ .startup = noritake_startup_irq,
+ .shutdown = noritake_disable_irq,
+ .enable = noritake_enable_irq,
+ .disable = noritake_disable_irq,
+ .ack = noritake_disable_irq,
+ .end = noritake_enable_irq,
+};
+
+static void
+noritake_device_interrupt(unsigned long vector, struct pt_regs *regs)
+{
+ unsigned long pld;
+ unsigned int i;
+
+ /* Read the interrupt summary registers of NORITAKE */
+ pld = (((unsigned long) inw(0x54c) << 32)
+ | ((unsigned long) inw(0x54a) << 16)
+ | ((unsigned long) inb(0xa0) << 8)
+ | inb(0x20));
+
+ /*
+ * Now for every possible bit set, work through them and call
+ * the appropriate interrupt handler.
+ */
+ while (pld) {
+ i = ffz(~pld);
+ pld &= pld - 1; /* clear least bit set */
+ if (i < 16) {
+ isa_device_interrupt(vector, regs);
+ } else {
+ handle_irq(i, regs);
+ }
+ }
+}
+
+static void
+noritake_srm_device_interrupt(unsigned long vector, struct pt_regs * regs)
+{
+ int irq;
+
+ irq = (vector - 0x800) >> 4;
+
+ /*
+ * I really hate to do this, too, but the NORITAKE SRM console also
+ * reports PCI vectors *lower* than I expected from the bit numbers
+ * in the documentation.
+ * But I really don't want to change the fixup code for allocation
+ * of IRQs, nor the alpha_irq_mask maintenance stuff, both of which
+ * look nice and clean now.
+ * So, here's this additional grotty hack... :-(
+ */
+ if (irq >= 16)
+ irq = irq + 1;
+
+ handle_irq(irq, regs);
+}
+
+static void __init
+noritake_init_irq(void)
+{
+ long i;
+
+ if (alpha_using_srm)
+ alpha_mv.device_interrupt = noritake_srm_device_interrupt;
+
+ outw(0, 0x54a);
+ outw(0, 0x54c);
+
+ for (i = 16; i < 48; ++i) {
+ irq_desc[i].status = IRQ_DISABLED | IRQ_LEVEL;
+ irq_desc[i].handler = &noritake_irq_type;
+ }
+
+ init_i8259a_irqs();
+ common_init_isa_dma();
+}
+
+
+/*
+ * PCI Fixup configuration.
+ *
+ * Summary @ 0x542, summary register #1:
+ * Bit Meaning
+ * 0 All valid ints from summary regs 2 & 3
+ * 1 QLOGIC ISP1020A SCSI
+ * 2 Interrupt Line A from slot 0
+ * 3 Interrupt Line B from slot 0
+ * 4 Interrupt Line A from slot 1
+ * 5 Interrupt line B from slot 1
+ * 6 Interrupt Line A from slot 2
+ * 7 Interrupt Line B from slot 2
+ * 8 Interrupt Line A from slot 3
+ * 9 Interrupt Line B from slot 3
+ *10 Interrupt Line A from slot 4
+ *11 Interrupt Line B from slot 4
+ *12 Interrupt Line A from slot 5
+ *13 Interrupt Line B from slot 5
+ *14 Interrupt Line A from slot 6
+ *15 Interrupt Line B from slot 6
+ *
+ * Summary @ 0x544, summary register #2:
+ * Bit Meaning
+ * 0 OR of all unmasked ints in SR #2
+ * 1 OR of secondary bus ints
+ * 2 Interrupt Line C from slot 0
+ * 3 Interrupt Line D from slot 0
+ * 4 Interrupt Line C from slot 1
+ * 5 Interrupt line D from slot 1
+ * 6 Interrupt Line C from slot 2
+ * 7 Interrupt Line D from slot 2
+ * 8 Interrupt Line C from slot 3
+ * 9 Interrupt Line D from slot 3
+ *10 Interrupt Line C from slot 4
+ *11 Interrupt Line D from slot 4
+ *12 Interrupt Line C from slot 5
+ *13 Interrupt Line D from slot 5
+ *14 Interrupt Line C from slot 6
+ *15 Interrupt Line D from slot 6
+ *
+ * The device to slot mapping looks like:
+ *
+ * Slot Device
+ * 7 Intel PCI-EISA bridge chip
+ * 8 DEC PCI-PCI bridge chip
+ * 11 PCI on board slot 0
+ * 12 PCI on board slot 1
+ * 13 PCI on board slot 2
+ *
+ *
+ * This two layered interrupt approach means that we allocate IRQ 16 and
+ * above for PCI interrupts. The IRQ relates to which bit the interrupt
+ * comes in on. This makes interrupt processing much easier.
+ */
+
+static int __init
+noritake_map_irq(struct pci_dev *dev, u8 slot, u8 pin)
+{
+ static char irq_tab[15][5] __initdata = {
+ /*INT INTA INTB INTC INTD */
+ /* note: IDSELs 16, 17, and 25 are CORELLE only */
+ { 16+1, 16+1, 16+1, 16+1, 16+1}, /* IdSel 16, QLOGIC */
+ { -1, -1, -1, -1, -1}, /* IdSel 17, S3 Trio64 */
+ { -1, -1, -1, -1, -1}, /* IdSel 18, PCEB */
+ { -1, -1, -1, -1, -1}, /* IdSel 19, PPB */
+ { -1, -1, -1, -1, -1}, /* IdSel 20, ???? */
+ { -1, -1, -1, -1, -1}, /* IdSel 21, ???? */
+ { 16+2, 16+2, 16+3, 32+2, 32+3}, /* IdSel 22, slot 0 */
+ { 16+4, 16+4, 16+5, 32+4, 32+5}, /* IdSel 23, slot 1 */
+ { 16+6, 16+6, 16+7, 32+6, 32+7}, /* IdSel 24, slot 2 */
+ { 16+8, 16+8, 16+9, 32+8, 32+9}, /* IdSel 25, slot 3 */
+ /* The following 5 are actually on PCI bus 1, which is
+ across the built-in bridge of the NORITAKE only. */
+ { 16+1, 16+1, 16+1, 16+1, 16+1}, /* IdSel 16, QLOGIC */
+ { 16+8, 16+8, 16+9, 32+8, 32+9}, /* IdSel 17, slot 3 */
+ {16+10, 16+10, 16+11, 32+10, 32+11}, /* IdSel 18, slot 4 */
+ {16+12, 16+12, 16+13, 32+12, 32+13}, /* IdSel 19, slot 5 */
+ {16+14, 16+14, 16+15, 32+14, 32+15}, /* IdSel 20, slot 6 */
+ };
+ const long min_idsel = 5, max_idsel = 19, irqs_per_slot = 5;
+ return COMMON_TABLE_LOOKUP;
+}
+
+static u8 __init
+noritake_swizzle(struct pci_dev *dev, u8 *pinp)
+{
+ int slot, pin = *pinp;
+
+ if (dev->bus->number == 0) {
+ slot = PCI_SLOT(dev->devfn);
+ }
+ /* Check for the built-in bridge */
+ else if (PCI_SLOT(dev->bus->self->devfn) == 8) {
+ slot = PCI_SLOT(dev->devfn) + 15; /* WAG! */
+ }
+ else
+ {
+ /* Must be a card-based bridge. */
+ do {
+ if (PCI_SLOT(dev->bus->self->devfn) == 8) {
+ slot = PCI_SLOT(dev->devfn) + 15;
+ break;
+ }
+ pin = bridge_swizzle(pin, PCI_SLOT(dev->devfn)) ;
+
+ /* Move up the chain of bridges. */
+ dev = dev->bus->self;
+ /* Slot of the next bridge. */
+ slot = PCI_SLOT(dev->devfn);
+ } while (dev->bus->self);
+ }
+ *pinp = pin;
+ return slot;
+}
+
+#if defined(CONFIG_ALPHA_GENERIC) || !defined(CONFIG_ALPHA_PRIMO)
+static void
+noritake_apecs_machine_check(unsigned long vector, unsigned long la_ptr,
+ struct pt_regs * regs)
+{
+#define MCHK_NO_DEVSEL 0x205U
+#define MCHK_NO_TABT 0x204U
+
+ struct el_common *mchk_header;
+ unsigned int code;
+
+ mchk_header = (struct el_common *)la_ptr;
+
+ /* Clear the error before any reporting. */
+ mb();
+ mb(); /* magic */
+ draina();
+ apecs_pci_clr_err();
+ wrmces(0x7);
+ mb();
+
+ code = mchk_header->code;
+ process_mcheck_info(vector, la_ptr, regs, "NORITAKE APECS",
+ (mcheck_expected(0)
+ && (code == MCHK_NO_DEVSEL
+ || code == MCHK_NO_TABT)));
+}
+#endif
+
+
+/*
+ * The System Vectors
+ */
+
+#if defined(CONFIG_ALPHA_GENERIC) || !defined(CONFIG_ALPHA_PRIMO)
+struct alpha_machine_vector noritake_mv __initmv = {
+ .vector_name = "Noritake",
+ DO_EV4_MMU,
+ DO_DEFAULT_RTC,
+ DO_APECS_IO,
+ .machine_check = noritake_apecs_machine_check,
+ .max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS,
+ .min_io_address = EISA_DEFAULT_IO_BASE,
+ .min_mem_address = APECS_AND_LCA_DEFAULT_MEM_BASE,
+
+ .nr_irqs = 48,
+ .device_interrupt = noritake_device_interrupt,
+
+ .init_arch = apecs_init_arch,
+ .init_irq = noritake_init_irq,
+ .init_rtc = common_init_rtc,
+ .init_pci = common_init_pci,
+ .pci_map_irq = noritake_map_irq,
+ .pci_swizzle = noritake_swizzle,
+};
+ALIAS_MV(noritake)
+#endif
+
+#if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_PRIMO)
+struct alpha_machine_vector noritake_primo_mv __initmv = {
+ .vector_name = "Noritake-Primo",
+ DO_EV5_MMU,
+ DO_DEFAULT_RTC,
+ DO_CIA_IO,
+ .machine_check = cia_machine_check,
+ .max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS,
+ .min_io_address = EISA_DEFAULT_IO_BASE,
+ .min_mem_address = CIA_DEFAULT_MEM_BASE,
+
+ .nr_irqs = 48,
+ .device_interrupt = noritake_device_interrupt,
+
+ .init_arch = cia_init_arch,
+ .init_irq = noritake_init_irq,
+ .init_rtc = common_init_rtc,
+ .init_pci = cia_init_pci,
+ .kill_arch = cia_kill_arch,
+ .pci_map_irq = noritake_map_irq,
+ .pci_swizzle = noritake_swizzle,
+};
+ALIAS_MV(noritake_primo)
+#endif
diff --git a/arch/alpha/kernel/sys_rawhide.c b/arch/alpha/kernel/sys_rawhide.c
new file mode 100644
index 000000000000..05888a02a604
--- /dev/null
+++ b/arch/alpha/kernel/sys_rawhide.c
@@ -0,0 +1,270 @@
+/*
+ * linux/arch/alpha/kernel/sys_rawhide.c
+ *
+ * Copyright (C) 1995 David A Rusling
+ * Copyright (C) 1996 Jay A Estabrook
+ * Copyright (C) 1998, 1999 Richard Henderson
+ *
+ * Code supporting the RAWHIDE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/mm.h>
+#include <linux/sched.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+
+#include <asm/ptrace.h>
+#include <asm/system.h>
+#include <asm/dma.h>
+#include <asm/irq.h>
+#include <asm/mmu_context.h>
+#include <asm/io.h>
+#include <asm/pgtable.h>
+#include <asm/core_mcpcia.h>
+#include <asm/tlbflush.h>
+
+#include "proto.h"
+#include "irq_impl.h"
+#include "pci_impl.h"
+#include "machvec_impl.h"
+
+
+/*
+ * HACK ALERT! only the boot cpu is used for interrupts.
+ */
+
+
+/* Note mask bit is true for ENABLED irqs. */
+
+static unsigned int hose_irq_masks[4] = {
+ 0xff0000, 0xfe0000, 0xff0000, 0xff0000
+};
+static unsigned int cached_irq_masks[4];
+DEFINE_SPINLOCK(rawhide_irq_lock);
+
+static inline void
+rawhide_update_irq_hw(int hose, int mask)
+{
+ *(vuip)MCPCIA_INT_MASK0(MCPCIA_HOSE2MID(hose)) = mask;
+ mb();
+ *(vuip)MCPCIA_INT_MASK0(MCPCIA_HOSE2MID(hose));
+}
+
+static inline void
+rawhide_enable_irq(unsigned int irq)
+{
+ unsigned int mask, hose;
+
+ irq -= 16;
+ hose = irq / 24;
+ irq -= hose * 24;
+ mask = 1 << irq;
+
+ spin_lock(&rawhide_irq_lock);
+ mask |= cached_irq_masks[hose];
+ cached_irq_masks[hose] = mask;
+ rawhide_update_irq_hw(hose, mask);
+ spin_unlock(&rawhide_irq_lock);
+}
+
+static void
+rawhide_disable_irq(unsigned int irq)
+{
+ unsigned int mask, hose;
+
+ irq -= 16;
+ hose = irq / 24;
+ irq -= hose * 24;
+ mask = ~(1 << irq) | hose_irq_masks[hose];
+
+ spin_lock(&rawhide_irq_lock);
+ mask &= cached_irq_masks[hose];
+ cached_irq_masks[hose] = mask;
+ rawhide_update_irq_hw(hose, mask);
+ spin_unlock(&rawhide_irq_lock);
+}
+
+static void
+rawhide_mask_and_ack_irq(unsigned int irq)
+{
+ unsigned int mask, mask1, hose;
+
+ irq -= 16;
+ hose = irq / 24;
+ irq -= hose * 24;
+ mask1 = 1 << irq;
+ mask = ~mask1 | hose_irq_masks[hose];
+
+ spin_lock(&rawhide_irq_lock);
+
+ mask &= cached_irq_masks[hose];
+ cached_irq_masks[hose] = mask;
+ rawhide_update_irq_hw(hose, mask);
+
+ /* Clear the interrupt. */
+ *(vuip)MCPCIA_INT_REQ(MCPCIA_HOSE2MID(hose)) = mask1;
+
+ spin_unlock(&rawhide_irq_lock);
+}
+
+static unsigned int
+rawhide_startup_irq(unsigned int irq)
+{
+ rawhide_enable_irq(irq);
+ return 0;
+}
+
+static void
+rawhide_end_irq(unsigned int irq)
+{
+ if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS)))
+ rawhide_enable_irq(irq);
+}
+
+static struct hw_interrupt_type rawhide_irq_type = {
+ .typename = "RAWHIDE",
+ .startup = rawhide_startup_irq,
+ .shutdown = rawhide_disable_irq,
+ .enable = rawhide_enable_irq,
+ .disable = rawhide_disable_irq,
+ .ack = rawhide_mask_and_ack_irq,
+ .end = rawhide_end_irq,
+};
+
+static void
+rawhide_srm_device_interrupt(unsigned long vector, struct pt_regs * regs)
+{
+ int irq;
+
+ irq = (vector - 0x800) >> 4;
+
+ /*
+ * The RAWHIDE SRM console reports PCI interrupts with a vector
+ * 0x80 *higher* than one might expect, as PCI IRQ 0 (ie bit 0)
+ * shows up as IRQ 24, etc, etc. We adjust it down by 8 to have
+ * it line up with the actual bit numbers from the REQ registers,
+ * which is how we manage the interrupts/mask. Sigh...
+ *
+ * Also, PCI #1 interrupts are offset some more... :-(
+ */
+
+ if (irq == 52) {
+ /* SCSI on PCI1 is special. */
+ irq = 72;
+ }
+
+ /* Adjust by which hose it is from. */
+ irq -= ((irq + 16) >> 2) & 0x38;
+
+ handle_irq(irq, regs);
+}
+
+static void __init
+rawhide_init_irq(void)
+{
+ struct pci_controller *hose;
+ long i;
+
+ mcpcia_init_hoses();
+
+ for (hose = hose_head; hose; hose = hose->next) {
+ unsigned int h = hose->index;
+ unsigned int mask = hose_irq_masks[h];
+
+ cached_irq_masks[h] = mask;
+ *(vuip)MCPCIA_INT_MASK0(MCPCIA_HOSE2MID(h)) = mask;
+ *(vuip)MCPCIA_INT_MASK1(MCPCIA_HOSE2MID(h)) = 0;
+ }
+
+ for (i = 16; i < 128; ++i) {
+ irq_desc[i].status = IRQ_DISABLED | IRQ_LEVEL;
+ irq_desc[i].handler = &rawhide_irq_type;
+ }
+
+ init_i8259a_irqs();
+ common_init_isa_dma();
+}
+
+/*
+ * PCI Fixup configuration.
+ *
+ * Summary @ MCPCIA_PCI0_INT_REQ:
+ * Bit Meaning
+ * 0 Interrupt Line A from slot 2 PCI0
+ * 1 Interrupt Line B from slot 2 PCI0
+ * 2 Interrupt Line C from slot 2 PCI0
+ * 3 Interrupt Line D from slot 2 PCI0
+ * 4 Interrupt Line A from slot 3 PCI0
+ * 5 Interrupt Line B from slot 3 PCI0
+ * 6 Interrupt Line C from slot 3 PCI0
+ * 7 Interrupt Line D from slot 3 PCI0
+ * 8 Interrupt Line A from slot 4 PCI0
+ * 9 Interrupt Line B from slot 4 PCI0
+ * 10 Interrupt Line C from slot 4 PCI0
+ * 11 Interrupt Line D from slot 4 PCI0
+ * 12 Interrupt Line A from slot 5 PCI0
+ * 13 Interrupt Line B from slot 5 PCI0
+ * 14 Interrupt Line C from slot 5 PCI0
+ * 15 Interrupt Line D from slot 5 PCI0
+ * 16 EISA interrupt (PCI 0) or SCSI interrupt (PCI 1)
+ * 17-23 NA
+ *
+ * IdSel
+ * 1 EISA bridge (PCI bus 0 only)
+ * 2 PCI option slot 2
+ * 3 PCI option slot 3
+ * 4 PCI option slot 4
+ * 5 PCI option slot 5
+ *
+ */
+
+static int __init
+rawhide_map_irq(struct pci_dev *dev, u8 slot, u8 pin)
+{
+ static char irq_tab[5][5] __initdata = {
+ /*INT INTA INTB INTC INTD */
+ { 16+16, 16+16, 16+16, 16+16, 16+16}, /* IdSel 1 SCSI PCI 1 */
+ { 16+ 0, 16+ 0, 16+ 1, 16+ 2, 16+ 3}, /* IdSel 2 slot 2 */
+ { 16+ 4, 16+ 4, 16+ 5, 16+ 6, 16+ 7}, /* IdSel 3 slot 3 */
+ { 16+ 8, 16+ 8, 16+ 9, 16+10, 16+11}, /* IdSel 4 slot 4 */
+ { 16+12, 16+12, 16+13, 16+14, 16+15} /* IdSel 5 slot 5 */
+ };
+ const long min_idsel = 1, max_idsel = 5, irqs_per_slot = 5;
+
+ struct pci_controller *hose = dev->sysdata;
+ int irq = COMMON_TABLE_LOOKUP;
+ if (irq >= 0)
+ irq += 24 * hose->index;
+ return irq;
+}
+
+
+/*
+ * The System Vector
+ */
+
+struct alpha_machine_vector rawhide_mv __initmv = {
+ .vector_name = "Rawhide",
+ DO_EV5_MMU,
+ DO_DEFAULT_RTC,
+ DO_MCPCIA_IO,
+ .machine_check = mcpcia_machine_check,
+ .max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS,
+ .min_io_address = DEFAULT_IO_BASE,
+ .min_mem_address = MCPCIA_DEFAULT_MEM_BASE,
+ .pci_dac_offset = MCPCIA_DAC_OFFSET,
+
+ .nr_irqs = 128,
+ .device_interrupt = rawhide_srm_device_interrupt,
+
+ .init_arch = mcpcia_init_arch,
+ .init_irq = rawhide_init_irq,
+ .init_rtc = common_init_rtc,
+ .init_pci = common_init_pci,
+ .kill_arch = NULL,
+ .pci_map_irq = rawhide_map_irq,
+ .pci_swizzle = common_swizzle,
+};
+ALIAS_MV(rawhide)
diff --git a/arch/alpha/kernel/sys_ruffian.c b/arch/alpha/kernel/sys_ruffian.c
new file mode 100644
index 000000000000..78c30decf3ff
--- /dev/null
+++ b/arch/alpha/kernel/sys_ruffian.c
@@ -0,0 +1,240 @@
+/*
+ * linux/arch/alpha/kernel/sys_ruffian.c
+ *
+ * Copyright (C) 1995 David A Rusling
+ * Copyright (C) 1996 Jay A Estabrook
+ * Copyright (C) 1998, 1999, 2000 Richard Henderson
+ *
+ * Code supporting the RUFFIAN.
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/mm.h>
+#include <linux/sched.h>
+#include <linux/pci.h>
+#include <linux/ioport.h>
+#include <linux/init.h>
+
+#include <asm/ptrace.h>
+#include <asm/system.h>
+#include <asm/dma.h>
+#include <asm/irq.h>
+#include <asm/mmu_context.h>
+#include <asm/io.h>
+#include <asm/pgtable.h>
+#include <asm/core_cia.h>
+#include <asm/tlbflush.h>
+#include <asm/8253pit.h>
+
+#include "proto.h"
+#include "irq_impl.h"
+#include "pci_impl.h"
+#include "machvec_impl.h"
+
+
+static void __init
+ruffian_init_irq(void)
+{
+ /* Invert 6&7 for i82371 */
+ *(vulp)PYXIS_INT_HILO = 0x000000c0UL; mb();
+ *(vulp)PYXIS_INT_CNFG = 0x00002064UL; mb(); /* all clear */
+
+ outb(0x11,0xA0);
+ outb(0x08,0xA1);
+ outb(0x02,0xA1);
+ outb(0x01,0xA1);
+ outb(0xFF,0xA1);
+
+ outb(0x11,0x20);
+ outb(0x00,0x21);
+ outb(0x04,0x21);
+ outb(0x01,0x21);
+ outb(0xFF,0x21);
+
+ /* Finish writing the 82C59A PIC Operation Control Words */
+ outb(0x20,0xA0);
+ outb(0x20,0x20);
+
+ init_i8259a_irqs();
+
+ /* Not interested in the bogus interrupts (0,3,6),
+ NMI (1), HALT (2), flash (5), or 21142 (8). */
+ init_pyxis_irqs(0x16f0000);
+
+ common_init_isa_dma();
+}
+
+#define RUFFIAN_LATCH ((PIT_TICK_RATE + HZ / 2) / HZ)
+
+static void __init
+ruffian_init_rtc(void)
+{
+ /* Ruffian does not have the RTC connected to the CPU timer
+ interrupt. Instead, it uses the PIT connected to IRQ 0. */
+
+ /* Setup interval timer. */
+ outb(0x34, 0x43); /* binary, mode 2, LSB/MSB, ch 0 */
+ outb(RUFFIAN_LATCH & 0xff, 0x40); /* LSB */
+ outb(RUFFIAN_LATCH >> 8, 0x40); /* MSB */
+
+ outb(0xb6, 0x43); /* pit counter 2: speaker */
+ outb(0x31, 0x42);
+ outb(0x13, 0x42);
+
+ setup_irq(0, &timer_irqaction);
+}
+
+static void
+ruffian_kill_arch (int mode)
+{
+ cia_kill_arch(mode);
+#if 0
+ /* This only causes re-entry to ARCSBIOS */
+ /* Perhaps this works for other PYXIS as well? */
+ *(vuip) PYXIS_RESET = 0x0000dead;
+ mb();
+#endif
+}
+
+/*
+ * Interrupt routing:
+ *
+ * Primary bus
+ * IdSel INTA INTB INTC INTD
+ * 21052 13 - - - -
+ * SIO 14 23 - - -
+ * 21143 15 44 - - -
+ * Slot 0 17 43 42 41 40
+ *
+ * Secondary bus
+ * IdSel INTA INTB INTC INTD
+ * Slot 0 8 (18) 19 18 17 16
+ * Slot 1 9 (19) 31 30 29 28
+ * Slot 2 10 (20) 27 26 25 24
+ * Slot 3 11 (21) 39 38 37 36
+ * Slot 4 12 (22) 35 34 33 32
+ * 53c875 13 (23) 20 - - -
+ *
+ */
+
+static int __init
+ruffian_map_irq(struct pci_dev *dev, u8 slot, u8 pin)
+{
+ static char irq_tab[11][5] __initdata = {
+ /*INT INTA INTB INTC INTD */
+ {-1, -1, -1, -1, -1}, /* IdSel 13, 21052 */
+ {-1, -1, -1, -1, -1}, /* IdSel 14, SIO */
+ {44, 44, 44, 44, 44}, /* IdSel 15, 21143 */
+ {-1, -1, -1, -1, -1}, /* IdSel 16, none */
+ {43, 43, 42, 41, 40}, /* IdSel 17, 64-bit slot */
+ /* the next 6 are actually on PCI bus 1, across the bridge */
+ {19, 19, 18, 17, 16}, /* IdSel 8, slot 0 */
+ {31, 31, 30, 29, 28}, /* IdSel 9, slot 1 */
+ {27, 27, 26, 25, 24}, /* IdSel 10, slot 2 */
+ {39, 39, 38, 37, 36}, /* IdSel 11, slot 3 */
+ {35, 35, 34, 33, 32}, /* IdSel 12, slot 4 */
+ {20, 20, 20, 20, 20}, /* IdSel 13, 53c875 */
+ };
+ const long min_idsel = 13, max_idsel = 23, irqs_per_slot = 5;
+ return COMMON_TABLE_LOOKUP;
+}
+
+static u8 __init
+ruffian_swizzle(struct pci_dev *dev, u8 *pinp)
+{
+ int slot, pin = *pinp;
+
+ if (dev->bus->number == 0) {
+ slot = PCI_SLOT(dev->devfn);
+ }
+ /* Check for the built-in bridge. */
+ else if (PCI_SLOT(dev->bus->self->devfn) == 13) {
+ slot = PCI_SLOT(dev->devfn) + 10;
+ }
+ else
+ {
+ /* Must be a card-based bridge. */
+ do {
+ if (PCI_SLOT(dev->bus->self->devfn) == 13) {
+ slot = PCI_SLOT(dev->devfn) + 10;
+ break;
+ }
+ pin = bridge_swizzle(pin, PCI_SLOT(dev->devfn));
+
+ /* Move up the chain of bridges. */
+ dev = dev->bus->self;
+ /* Slot of the next bridge. */
+ slot = PCI_SLOT(dev->devfn);
+ } while (dev->bus->self);
+ }
+ *pinp = pin;
+ return slot;
+}
+
+#ifdef BUILDING_FOR_MILO
+/*
+ * The DeskStation Ruffian motherboard firmware does not place
+ * the memory size in the PALimpure area. Therefore, we use
+ * the Bank Configuration Registers in PYXIS to obtain the size.
+ */
+static unsigned long __init
+ruffian_get_bank_size(unsigned long offset)
+{
+ unsigned long bank_addr, bank, ret = 0;
+
+ /* Valid offsets are: 0x800, 0x840 and 0x880
+ since Ruffian only uses three banks. */
+ bank_addr = (unsigned long)PYXIS_MCR + offset;
+ bank = *(vulp)bank_addr;
+
+ /* Check BANK_ENABLE */
+ if (bank & 0x01) {
+ static unsigned long size[] __initdata = {
+ 0x40000000UL, /* 0x00, 1G */
+ 0x20000000UL, /* 0x02, 512M */
+ 0x10000000UL, /* 0x04, 256M */
+ 0x08000000UL, /* 0x06, 128M */
+ 0x04000000UL, /* 0x08, 64M */
+ 0x02000000UL, /* 0x0a, 32M */
+ 0x01000000UL, /* 0x0c, 16M */
+ 0x00800000UL, /* 0x0e, 8M */
+ 0x80000000UL, /* 0x10, 2G */
+ };
+
+ bank = (bank & 0x1e) >> 1;
+ if (bank < sizeof(size)/sizeof(*size))
+ ret = size[bank];
+ }
+
+ return ret;
+}
+#endif /* BUILDING_FOR_MILO */
+
+/*
+ * The System Vector
+ */
+
+struct alpha_machine_vector ruffian_mv __initmv = {
+ .vector_name = "Ruffian",
+ DO_EV5_MMU,
+ DO_DEFAULT_RTC,
+ DO_PYXIS_IO,
+ .machine_check = cia_machine_check,
+ .max_isa_dma_address = ALPHA_RUFFIAN_MAX_ISA_DMA_ADDRESS,
+ .min_io_address = DEFAULT_IO_BASE,
+ .min_mem_address = DEFAULT_MEM_BASE,
+ .pci_dac_offset = PYXIS_DAC_OFFSET,
+
+ .nr_irqs = 48,
+ .device_interrupt = pyxis_device_interrupt,
+
+ .init_arch = pyxis_init_arch,
+ .init_irq = ruffian_init_irq,
+ .init_rtc = ruffian_init_rtc,
+ .init_pci = cia_init_pci,
+ .kill_arch = ruffian_kill_arch,
+ .pci_map_irq = ruffian_map_irq,
+ .pci_swizzle = ruffian_swizzle,
+};
+ALIAS_MV(ruffian)
diff --git a/arch/alpha/kernel/sys_rx164.c b/arch/alpha/kernel/sys_rx164.c
new file mode 100644
index 000000000000..58404243057b
--- /dev/null
+++ b/arch/alpha/kernel/sys_rx164.c
@@ -0,0 +1,220 @@
+/*
+ * linux/arch/alpha/kernel/sys_rx164.c
+ *
+ * Copyright (C) 1995 David A Rusling
+ * Copyright (C) 1996 Jay A Estabrook
+ * Copyright (C) 1998, 1999 Richard Henderson
+ *
+ * Code supporting the RX164 (PCA56+POLARIS).
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/mm.h>
+#include <linux/sched.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/bitops.h>
+
+#include <asm/ptrace.h>
+#include <asm/system.h>
+#include <asm/dma.h>
+#include <asm/irq.h>
+#include <asm/mmu_context.h>
+#include <asm/io.h>
+#include <asm/pgtable.h>
+#include <asm/core_polaris.h>
+#include <asm/tlbflush.h>
+
+#include "proto.h"
+#include "irq_impl.h"
+#include "pci_impl.h"
+#include "machvec_impl.h"
+
+
+/* Note mask bit is true for ENABLED irqs. */
+static unsigned long cached_irq_mask;
+
+static inline void
+rx164_update_irq_hw(unsigned long mask)
+{
+ volatile unsigned int *irq_mask;
+
+ irq_mask = (void *)(POLARIS_DENSE_CONFIG_BASE + 0x74);
+ *irq_mask = mask;
+ mb();
+ *irq_mask;
+}
+
+static inline void
+rx164_enable_irq(unsigned int irq)
+{
+ rx164_update_irq_hw(cached_irq_mask |= 1UL << (irq - 16));
+}
+
+static void
+rx164_disable_irq(unsigned int irq)
+{
+ rx164_update_irq_hw(cached_irq_mask &= ~(1UL << (irq - 16)));
+}
+
+static unsigned int
+rx164_startup_irq(unsigned int irq)
+{
+ rx164_enable_irq(irq);
+ return 0;
+}
+
+static void
+rx164_end_irq(unsigned int irq)
+{
+ if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS)))
+ rx164_enable_irq(irq);
+}
+
+static struct hw_interrupt_type rx164_irq_type = {
+ .typename = "RX164",
+ .startup = rx164_startup_irq,
+ .shutdown = rx164_disable_irq,
+ .enable = rx164_enable_irq,
+ .disable = rx164_disable_irq,
+ .ack = rx164_disable_irq,
+ .end = rx164_end_irq,
+};
+
+static void
+rx164_device_interrupt(unsigned long vector, struct pt_regs *regs)
+{
+ unsigned long pld;
+ volatile unsigned int *dirr;
+ long i;
+
+ /* Read the interrupt summary register. On Polaris, this is
+ the DIRR register in PCI config space (offset 0x84). */
+ dirr = (void *)(POLARIS_DENSE_CONFIG_BASE + 0x84);
+ pld = *dirr;
+
+ /*
+ * Now for every possible bit set, work through them and call
+ * the appropriate interrupt handler.
+ */
+ while (pld) {
+ i = ffz(~pld);
+ pld &= pld - 1; /* clear least bit set */
+ if (i == 20) {
+ isa_no_iack_sc_device_interrupt(vector, regs);
+ } else {
+ handle_irq(16+i, regs);
+ }
+ }
+}
+
+static void __init
+rx164_init_irq(void)
+{
+ long i;
+
+ rx164_update_irq_hw(0);
+ for (i = 16; i < 40; ++i) {
+ irq_desc[i].status = IRQ_DISABLED | IRQ_LEVEL;
+ irq_desc[i].handler = &rx164_irq_type;
+ }
+
+ init_i8259a_irqs();
+ common_init_isa_dma();
+
+ setup_irq(16+20, &isa_cascade_irqaction);
+}
+
+
+/*
+ * The RX164 changed its interrupt routing between pass1 and pass2...
+ *
+ * PASS1:
+ *
+ * Slot IDSEL INTA INTB INTC INTD
+ * 0 6 5 10 15 20
+ * 1 7 4 9 14 19
+ * 2 5 3 8 13 18
+ * 3 9 2 7 12 17
+ * 4 10 1 6 11 16
+ *
+ * PASS2:
+ * Slot IDSEL INTA INTB INTC INTD
+ * 0 5 1 7 12 17
+ * 1 6 2 8 13 18
+ * 2 8 3 9 14 19
+ * 3 9 4 10 15 20
+ * 4 10 5 11 16 6
+ *
+ */
+
+/*
+ * IdSel
+ * 5 32 bit PCI option slot 0
+ * 6 64 bit PCI option slot 1
+ * 7 PCI-ISA bridge
+ * 7 64 bit PCI option slot 2
+ * 9 32 bit PCI option slot 3
+ * 10 PCI-PCI bridge
+ *
+ */
+
+static int __init
+rx164_map_irq(struct pci_dev *dev, u8 slot, u8 pin)
+{
+#if 0
+ static char irq_tab_pass1[6][5] __initdata = {
+ /*INT INTA INTB INTC INTD */
+ { 16+3, 16+3, 16+8, 16+13, 16+18}, /* IdSel 5, slot 2 */
+ { 16+5, 16+5, 16+10, 16+15, 16+20}, /* IdSel 6, slot 0 */
+ { 16+4, 16+4, 16+9, 16+14, 16+19}, /* IdSel 7, slot 1 */
+ { -1, -1, -1, -1, -1}, /* IdSel 8, PCI/ISA bridge */
+ { 16+2, 16+2, 16+7, 16+12, 16+17}, /* IdSel 9, slot 3 */
+ { 16+1, 16+1, 16+6, 16+11, 16+16}, /* IdSel 10, slot 4 */
+ };
+#else
+ static char irq_tab[6][5] __initdata = {
+ /*INT INTA INTB INTC INTD */
+ { 16+0, 16+0, 16+6, 16+11, 16+16}, /* IdSel 5, slot 0 */
+ { 16+1, 16+1, 16+7, 16+12, 16+17}, /* IdSel 6, slot 1 */
+ { -1, -1, -1, -1, -1}, /* IdSel 7, PCI/ISA bridge */
+ { 16+2, 16+2, 16+8, 16+13, 16+18}, /* IdSel 8, slot 2 */
+ { 16+3, 16+3, 16+9, 16+14, 16+19}, /* IdSel 9, slot 3 */
+ { 16+4, 16+4, 16+10, 16+15, 16+5}, /* IdSel 10, PCI-PCI */
+ };
+#endif
+ const long min_idsel = 5, max_idsel = 10, irqs_per_slot = 5;
+
+ /* JRP - Need to figure out how to distinguish pass1 from pass2,
+ and use the correct table. */
+ return COMMON_TABLE_LOOKUP;
+}
+
+
+/*
+ * The System Vector
+ */
+
+struct alpha_machine_vector rx164_mv __initmv = {
+ .vector_name = "RX164",
+ DO_EV5_MMU,
+ DO_DEFAULT_RTC,
+ DO_POLARIS_IO,
+ .machine_check = polaris_machine_check,
+ .max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS,
+ .min_io_address = DEFAULT_IO_BASE,
+ .min_mem_address = DEFAULT_MEM_BASE,
+
+ .nr_irqs = 40,
+ .device_interrupt = rx164_device_interrupt,
+
+ .init_arch = polaris_init_arch,
+ .init_irq = rx164_init_irq,
+ .init_rtc = common_init_rtc,
+ .init_pci = common_init_pci,
+ .kill_arch = NULL,
+ .pci_map_irq = rx164_map_irq,
+ .pci_swizzle = common_swizzle,
+};
+ALIAS_MV(rx164)
diff --git a/arch/alpha/kernel/sys_sable.c b/arch/alpha/kernel/sys_sable.c
new file mode 100644
index 000000000000..a7ff84474ace
--- /dev/null
+++ b/arch/alpha/kernel/sys_sable.c
@@ -0,0 +1,653 @@
+/*
+ * linux/arch/alpha/kernel/sys_sable.c
+ *
+ * Copyright (C) 1995 David A Rusling
+ * Copyright (C) 1996 Jay A Estabrook
+ * Copyright (C) 1998, 1999 Richard Henderson
+ *
+ * Code supporting the Sable, Sable-Gamma, and Lynx systems.
+ */
+
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/mm.h>
+#include <linux/sched.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+
+#include <asm/ptrace.h>
+#include <asm/system.h>
+#include <asm/dma.h>
+#include <asm/irq.h>
+#include <asm/mmu_context.h>
+#include <asm/io.h>
+#include <asm/pgtable.h>
+#include <asm/core_t2.h>
+#include <asm/tlbflush.h>
+
+#include "proto.h"
+#include "irq_impl.h"
+#include "pci_impl.h"
+#include "machvec_impl.h"
+
+DEFINE_SPINLOCK(sable_lynx_irq_lock);
+
+typedef struct irq_swizzle_struct
+{
+ char irq_to_mask[64];
+ char mask_to_irq[64];
+
+ /* Note mask bit is true for DISABLED irqs. */
+ unsigned long shadow_mask;
+
+ void (*update_irq_hw)(unsigned long bit, unsigned long mask);
+ void (*ack_irq_hw)(unsigned long bit);
+
+} irq_swizzle_t;
+
+static irq_swizzle_t *sable_lynx_irq_swizzle;
+
+static void sable_lynx_init_irq(int nr_irqs);
+
+#if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_SABLE)
+
+/***********************************************************************/
+/*
+ * For SABLE, which is really baroque, we manage 40 IRQ's, but the
+ * hardware really only supports 24, not via normal ISA PIC,
+ * but cascaded custom 8259's, etc.
+ * 0-7 (char at 536)
+ * 8-15 (char at 53a)
+ * 16-23 (char at 53c)
+ *
+ * Summary Registers (536/53a/53c):
+ *
+ * Bit Meaning Kernel IRQ
+ *------------------------------------------
+ * 0 PCI slot 0 34
+ * 1 NCR810 (builtin) 33
+ * 2 TULIP (builtin) 32
+ * 3 mouse 12
+ * 4 PCI slot 1 35
+ * 5 PCI slot 2 36
+ * 6 keyboard 1
+ * 7 floppy 6
+ * 8 COM2 3
+ * 9 parallel port 7
+ *10 EISA irq 3 -
+ *11 EISA irq 4 -
+ *12 EISA irq 5 5
+ *13 EISA irq 6 -
+ *14 EISA irq 7 -
+ *15 COM1 4
+ *16 EISA irq 9 9
+ *17 EISA irq 10 10
+ *18 EISA irq 11 11
+ *19 EISA irq 12 -
+ *20 EISA irq 13 -
+ *21 EISA irq 14 14
+ *22 NC 15
+ *23 IIC -
+ */
+
+static void
+sable_update_irq_hw(unsigned long bit, unsigned long mask)
+{
+ int port = 0x537;
+
+ if (bit >= 16) {
+ port = 0x53d;
+ mask >>= 16;
+ } else if (bit >= 8) {
+ port = 0x53b;
+ mask >>= 8;
+ }
+
+ outb(mask, port);
+}
+
+static void
+sable_ack_irq_hw(unsigned long bit)
+{
+ int port, val1, val2;
+
+ if (bit >= 16) {
+ port = 0x53c;
+ val1 = 0xE0 | (bit - 16);
+ val2 = 0xE0 | 4;
+ } else if (bit >= 8) {
+ port = 0x53a;
+ val1 = 0xE0 | (bit - 8);
+ val2 = 0xE0 | 3;
+ } else {
+ port = 0x536;
+ val1 = 0xE0 | (bit - 0);
+ val2 = 0xE0 | 1;
+ }
+
+ outb(val1, port); /* ack the slave */
+ outb(val2, 0x534); /* ack the master */
+}
+
+static irq_swizzle_t sable_irq_swizzle = {
+ {
+ -1, 6, -1, 8, 15, 12, 7, 9, /* pseudo PIC 0-7 */
+ -1, 16, 17, 18, 3, -1, 21, 22, /* pseudo PIC 8-15 */
+ -1, -1, -1, -1, -1, -1, -1, -1, /* pseudo EISA 0-7 */
+ -1, -1, -1, -1, -1, -1, -1, -1, /* pseudo EISA 8-15 */
+ 2, 1, 0, 4, 5, -1, -1, -1, /* pseudo PCI */
+ -1, -1, -1, -1, -1, -1, -1, -1, /* */
+ -1, -1, -1, -1, -1, -1, -1, -1, /* */
+ -1, -1, -1, -1, -1, -1, -1, -1 /* */
+ },
+ {
+ 34, 33, 32, 12, 35, 36, 1, 6, /* mask 0-7 */
+ 3, 7, -1, -1, 5, -1, -1, 4, /* mask 8-15 */
+ 9, 10, 11, -1, -1, 14, 15, -1, /* mask 16-23 */
+ -1, -1, -1, -1, -1, -1, -1, -1, /* */
+ -1, -1, -1, -1, -1, -1, -1, -1, /* */
+ -1, -1, -1, -1, -1, -1, -1, -1, /* */
+ -1, -1, -1, -1, -1, -1, -1, -1, /* */
+ -1, -1, -1, -1, -1, -1, -1, -1 /* */
+ },
+ -1,
+ sable_update_irq_hw,
+ sable_ack_irq_hw
+};
+
+static void __init
+sable_init_irq(void)
+{
+ outb(-1, 0x537); /* slave 0 */
+ outb(-1, 0x53b); /* slave 1 */
+ outb(-1, 0x53d); /* slave 2 */
+ outb(0x44, 0x535); /* enable cascades in master */
+
+ sable_lynx_irq_swizzle = &sable_irq_swizzle;
+ sable_lynx_init_irq(40);
+}
+
+/*
+ * PCI Fixup configuration for ALPHA SABLE (2100).
+ *
+ * The device to slot mapping looks like:
+ *
+ * Slot Device
+ * 0 TULIP
+ * 1 SCSI
+ * 2 PCI-EISA bridge
+ * 3 none
+ * 4 none
+ * 5 none
+ * 6 PCI on board slot 0
+ * 7 PCI on board slot 1
+ * 8 PCI on board slot 2
+ *
+ *
+ * This two layered interrupt approach means that we allocate IRQ 16 and
+ * above for PCI interrupts. The IRQ relates to which bit the interrupt
+ * comes in on. This makes interrupt processing much easier.
+ */
+/*
+ * NOTE: the IRQ assignments below are arbitrary, but need to be consistent
+ * with the values in the irq swizzling tables above.
+ */
+
+static int __init
+sable_map_irq(struct pci_dev *dev, u8 slot, u8 pin)
+{
+ static char irq_tab[9][5] __initdata = {
+ /*INT INTA INTB INTC INTD */
+ { 32+0, 32+0, 32+0, 32+0, 32+0}, /* IdSel 0, TULIP */
+ { 32+1, 32+1, 32+1, 32+1, 32+1}, /* IdSel 1, SCSI */
+ { -1, -1, -1, -1, -1}, /* IdSel 2, SIO */
+ { -1, -1, -1, -1, -1}, /* IdSel 3, none */
+ { -1, -1, -1, -1, -1}, /* IdSel 4, none */
+ { -1, -1, -1, -1, -1}, /* IdSel 5, none */
+ { 32+2, 32+2, 32+2, 32+2, 32+2}, /* IdSel 6, slot 0 */
+ { 32+3, 32+3, 32+3, 32+3, 32+3}, /* IdSel 7, slot 1 */
+ { 32+4, 32+4, 32+4, 32+4, 32+4} /* IdSel 8, slot 2 */
+ };
+ long min_idsel = 0, max_idsel = 8, irqs_per_slot = 5;
+ return COMMON_TABLE_LOOKUP;
+}
+#endif /* defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_SABLE) */
+
+#if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_LYNX)
+
+/***********************************************************************/
+/* LYNX hardware specifics
+ */
+/*
+ * For LYNX, which is also baroque, we manage 64 IRQs, via a custom IC.
+ *
+ * Bit Meaning Kernel IRQ
+ *------------------------------------------
+ * 0
+ * 1
+ * 2
+ * 3 mouse 12
+ * 4
+ * 5
+ * 6 keyboard 1
+ * 7 floppy 6
+ * 8 COM2 3
+ * 9 parallel port 7
+ *10 EISA irq 3 -
+ *11 EISA irq 4 -
+ *12 EISA irq 5 5
+ *13 EISA irq 6 -
+ *14 EISA irq 7 -
+ *15 COM1 4
+ *16 EISA irq 9 9
+ *17 EISA irq 10 10
+ *18 EISA irq 11 11
+ *19 EISA irq 12 -
+ *20
+ *21 EISA irq 14 14
+ *22 EISA irq 15 15
+ *23 IIC -
+ *24 VGA (builtin) -
+ *25
+ *26
+ *27
+ *28 NCR810 (builtin) 28
+ *29
+ *30
+ *31
+ *32 PCI 0 slot 4 A primary bus 32
+ *33 PCI 0 slot 4 B primary bus 33
+ *34 PCI 0 slot 4 C primary bus 34
+ *35 PCI 0 slot 4 D primary bus
+ *36 PCI 0 slot 5 A primary bus
+ *37 PCI 0 slot 5 B primary bus
+ *38 PCI 0 slot 5 C primary bus
+ *39 PCI 0 slot 5 D primary bus
+ *40 PCI 0 slot 6 A primary bus
+ *41 PCI 0 slot 6 B primary bus
+ *42 PCI 0 slot 6 C primary bus
+ *43 PCI 0 slot 6 D primary bus
+ *44 PCI 0 slot 7 A primary bus
+ *45 PCI 0 slot 7 B primary bus
+ *46 PCI 0 slot 7 C primary bus
+ *47 PCI 0 slot 7 D primary bus
+ *48 PCI 0 slot 0 A secondary bus
+ *49 PCI 0 slot 0 B secondary bus
+ *50 PCI 0 slot 0 C secondary bus
+ *51 PCI 0 slot 0 D secondary bus
+ *52 PCI 0 slot 1 A secondary bus
+ *53 PCI 0 slot 1 B secondary bus
+ *54 PCI 0 slot 1 C secondary bus
+ *55 PCI 0 slot 1 D secondary bus
+ *56 PCI 0 slot 2 A secondary bus
+ *57 PCI 0 slot 2 B secondary bus
+ *58 PCI 0 slot 2 C secondary bus
+ *59 PCI 0 slot 2 D secondary bus
+ *60 PCI 0 slot 3 A secondary bus
+ *61 PCI 0 slot 3 B secondary bus
+ *62 PCI 0 slot 3 C secondary bus
+ *63 PCI 0 slot 3 D secondary bus
+ */
+
+static void
+lynx_update_irq_hw(unsigned long bit, unsigned long mask)
+{
+ /*
+ * Write the AIR register on the T3/T4 with the
+ * address of the IC mask register (offset 0x40)
+ */
+ *(vulp)T2_AIR = 0x40;
+ mb();
+ *(vulp)T2_AIR; /* re-read to force write */
+ mb();
+ *(vulp)T2_DIR = mask;
+ mb();
+ mb();
+}
+
+static void
+lynx_ack_irq_hw(unsigned long bit)
+{
+ *(vulp)T2_VAR = (u_long) bit;
+ mb();
+ mb();
+}
+
+static irq_swizzle_t lynx_irq_swizzle = {
+ { /* irq_to_mask */
+ -1, 6, -1, 8, 15, 12, 7, 9, /* pseudo PIC 0-7 */
+ -1, 16, 17, 18, 3, -1, 21, 22, /* pseudo PIC 8-15 */
+ -1, -1, -1, -1, -1, -1, -1, -1, /* pseudo */
+ -1, -1, -1, -1, 28, -1, -1, -1, /* pseudo */
+ 32, 33, 34, 35, 36, 37, 38, 39, /* mask 32-39 */
+ 40, 41, 42, 43, 44, 45, 46, 47, /* mask 40-47 */
+ 48, 49, 50, 51, 52, 53, 54, 55, /* mask 48-55 */
+ 56, 57, 58, 59, 60, 61, 62, 63 /* mask 56-63 */
+ },
+ { /* mask_to_irq */
+ -1, -1, -1, 12, -1, -1, 1, 6, /* mask 0-7 */
+ 3, 7, -1, -1, 5, -1, -1, 4, /* mask 8-15 */
+ 9, 10, 11, -1, -1, 14, 15, -1, /* mask 16-23 */
+ -1, -1, -1, -1, 28, -1, -1, -1, /* mask 24-31 */
+ 32, 33, 34, 35, 36, 37, 38, 39, /* mask 32-39 */
+ 40, 41, 42, 43, 44, 45, 46, 47, /* mask 40-47 */
+ 48, 49, 50, 51, 52, 53, 54, 55, /* mask 48-55 */
+ 56, 57, 58, 59, 60, 61, 62, 63 /* mask 56-63 */
+ },
+ -1,
+ lynx_update_irq_hw,
+ lynx_ack_irq_hw
+};
+
+static void __init
+lynx_init_irq(void)
+{
+ sable_lynx_irq_swizzle = &lynx_irq_swizzle;
+ sable_lynx_init_irq(64);
+}
+
+/*
+ * PCI Fixup configuration for ALPHA LYNX (2100A)
+ *
+ * The device to slot mapping looks like:
+ *
+ * Slot Device
+ * 0 none
+ * 1 none
+ * 2 PCI-EISA bridge
+ * 3 PCI-PCI bridge
+ * 4 NCR 810 (Demi-Lynx only)
+ * 5 none
+ * 6 PCI on board slot 4
+ * 7 PCI on board slot 5
+ * 8 PCI on board slot 6
+ * 9 PCI on board slot 7
+ *
+ * And behind the PPB we have:
+ *
+ * 11 PCI on board slot 0
+ * 12 PCI on board slot 1
+ * 13 PCI on board slot 2
+ * 14 PCI on board slot 3
+ */
+/*
+ * NOTE: the IRQ assignments below are arbitrary, but need to be consistent
+ * with the values in the irq swizzling tables above.
+ */
+
+static int __init
+lynx_map_irq(struct pci_dev *dev, u8 slot, u8 pin)
+{
+ static char irq_tab[19][5] __initdata = {
+ /*INT INTA INTB INTC INTD */
+ { -1, -1, -1, -1, -1}, /* IdSel 13, PCEB */
+ { -1, -1, -1, -1, -1}, /* IdSel 14, PPB */
+ { 28, 28, 28, 28, 28}, /* IdSel 15, NCR demi */
+ { -1, -1, -1, -1, -1}, /* IdSel 16, none */
+ { 32, 32, 33, 34, 35}, /* IdSel 17, slot 4 */
+ { 36, 36, 37, 38, 39}, /* IdSel 18, slot 5 */
+ { 40, 40, 41, 42, 43}, /* IdSel 19, slot 6 */
+ { 44, 44, 45, 46, 47}, /* IdSel 20, slot 7 */
+ { -1, -1, -1, -1, -1}, /* IdSel 22, none */
+ /* The following are actually behind the PPB. */
+ { -1, -1, -1, -1, -1}, /* IdSel 16 none */
+ { 28, 28, 28, 28, 28}, /* IdSel 17 NCR lynx */
+ { -1, -1, -1, -1, -1}, /* IdSel 18 none */
+ { -1, -1, -1, -1, -1}, /* IdSel 19 none */
+ { -1, -1, -1, -1, -1}, /* IdSel 20 none */
+ { -1, -1, -1, -1, -1}, /* IdSel 21 none */
+ { 48, 48, 49, 50, 51}, /* IdSel 22 slot 0 */
+ { 52, 52, 53, 54, 55}, /* IdSel 23 slot 1 */
+ { 56, 56, 57, 58, 59}, /* IdSel 24 slot 2 */
+ { 60, 60, 61, 62, 63} /* IdSel 25 slot 3 */
+ };
+ const long min_idsel = 2, max_idsel = 20, irqs_per_slot = 5;
+ return COMMON_TABLE_LOOKUP;
+}
+
+static u8 __init
+lynx_swizzle(struct pci_dev *dev, u8 *pinp)
+{
+ int slot, pin = *pinp;
+
+ if (dev->bus->number == 0) {
+ slot = PCI_SLOT(dev->devfn);
+ }
+ /* Check for the built-in bridge */
+ else if (PCI_SLOT(dev->bus->self->devfn) == 3) {
+ slot = PCI_SLOT(dev->devfn) + 11;
+ }
+ else
+ {
+ /* Must be a card-based bridge. */
+ do {
+ if (PCI_SLOT(dev->bus->self->devfn) == 3) {
+ slot = PCI_SLOT(dev->devfn) + 11;
+ break;
+ }
+ pin = bridge_swizzle(pin, PCI_SLOT(dev->devfn)) ;
+
+ /* Move up the chain of bridges. */
+ dev = dev->bus->self;
+ /* Slot of the next bridge. */
+ slot = PCI_SLOT(dev->devfn);
+ } while (dev->bus->self);
+ }
+ *pinp = pin;
+ return slot;
+}
+
+#endif /* defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_LYNX) */
+
+/***********************************************************************/
+/* GENERIC irq routines */
+
+static inline void
+sable_lynx_enable_irq(unsigned int irq)
+{
+ unsigned long bit, mask;
+
+ bit = sable_lynx_irq_swizzle->irq_to_mask[irq];
+ spin_lock(&sable_lynx_irq_lock);
+ mask = sable_lynx_irq_swizzle->shadow_mask &= ~(1UL << bit);
+ sable_lynx_irq_swizzle->update_irq_hw(bit, mask);
+ spin_unlock(&sable_lynx_irq_lock);
+#if 0
+ printk("%s: mask 0x%lx bit 0x%x irq 0x%x\n",
+ __FUNCTION__, mask, bit, irq);
+#endif
+}
+
+static void
+sable_lynx_disable_irq(unsigned int irq)
+{
+ unsigned long bit, mask;
+
+ bit = sable_lynx_irq_swizzle->irq_to_mask[irq];
+ spin_lock(&sable_lynx_irq_lock);
+ mask = sable_lynx_irq_swizzle->shadow_mask |= 1UL << bit;
+ sable_lynx_irq_swizzle->update_irq_hw(bit, mask);
+ spin_unlock(&sable_lynx_irq_lock);
+#if 0
+ printk("%s: mask 0x%lx bit 0x%x irq 0x%x\n",
+ __FUNCTION__, mask, bit, irq);
+#endif
+}
+
+static unsigned int
+sable_lynx_startup_irq(unsigned int irq)
+{
+ sable_lynx_enable_irq(irq);
+ return 0;
+}
+
+static void
+sable_lynx_end_irq(unsigned int irq)
+{
+ if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS)))
+ sable_lynx_enable_irq(irq);
+}
+
+static void
+sable_lynx_mask_and_ack_irq(unsigned int irq)
+{
+ unsigned long bit, mask;
+
+ bit = sable_lynx_irq_swizzle->irq_to_mask[irq];
+ spin_lock(&sable_lynx_irq_lock);
+ mask = sable_lynx_irq_swizzle->shadow_mask |= 1UL << bit;
+ sable_lynx_irq_swizzle->update_irq_hw(bit, mask);
+ sable_lynx_irq_swizzle->ack_irq_hw(bit);
+ spin_unlock(&sable_lynx_irq_lock);
+}
+
+static struct hw_interrupt_type sable_lynx_irq_type = {
+ .typename = "SABLE/LYNX",
+ .startup = sable_lynx_startup_irq,
+ .shutdown = sable_lynx_disable_irq,
+ .enable = sable_lynx_enable_irq,
+ .disable = sable_lynx_disable_irq,
+ .ack = sable_lynx_mask_and_ack_irq,
+ .end = sable_lynx_end_irq,
+};
+
+static void
+sable_lynx_srm_device_interrupt(unsigned long vector, struct pt_regs * regs)
+{
+ /* Note that the vector reported by the SRM PALcode corresponds
+ to the interrupt mask bits, but we have to manage via the
+ so-called legacy IRQs for many common devices. */
+
+ int bit, irq;
+
+ bit = (vector - 0x800) >> 4;
+ irq = sable_lynx_irq_swizzle->mask_to_irq[bit];
+#if 0
+ printk("%s: vector 0x%lx bit 0x%x irq 0x%x\n",
+ __FUNCTION__, vector, bit, irq);
+#endif
+ handle_irq(irq, regs);
+}
+
+static void __init
+sable_lynx_init_irq(int nr_irqs)
+{
+ long i;
+
+ for (i = 0; i < nr_irqs; ++i) {
+ irq_desc[i].status = IRQ_DISABLED | IRQ_LEVEL;
+ irq_desc[i].handler = &sable_lynx_irq_type;
+ }
+
+ common_init_isa_dma();
+}
+
+static void __init
+sable_lynx_init_pci(void)
+{
+ common_init_pci();
+}
+
+/*****************************************************************/
+/*
+ * The System Vectors
+ *
+ * In order that T2_HAE_ADDRESS should be a constant, we play
+ * these games with GAMMA_BIAS.
+ */
+
+#if defined(CONFIG_ALPHA_GENERIC) || \
+ (defined(CONFIG_ALPHA_SABLE) && !defined(CONFIG_ALPHA_GAMMA))
+#undef GAMMA_BIAS
+#define GAMMA_BIAS 0
+struct alpha_machine_vector sable_mv __initmv = {
+ .vector_name = "Sable",
+ DO_EV4_MMU,
+ DO_DEFAULT_RTC,
+ DO_T2_IO,
+ .machine_check = t2_machine_check,
+ .max_isa_dma_address = ALPHA_SABLE_MAX_ISA_DMA_ADDRESS,
+ .min_io_address = EISA_DEFAULT_IO_BASE,
+ .min_mem_address = T2_DEFAULT_MEM_BASE,
+
+ .nr_irqs = 40,
+ .device_interrupt = sable_lynx_srm_device_interrupt,
+
+ .init_arch = t2_init_arch,
+ .init_irq = sable_init_irq,
+ .init_rtc = common_init_rtc,
+ .init_pci = sable_lynx_init_pci,
+ .kill_arch = t2_kill_arch,
+ .pci_map_irq = sable_map_irq,
+ .pci_swizzle = common_swizzle,
+
+ .sys = { .t2 = {
+ .gamma_bias = 0
+ } }
+};
+ALIAS_MV(sable)
+#endif /* GENERIC || (SABLE && !GAMMA) */
+
+#if defined(CONFIG_ALPHA_GENERIC) || \
+ (defined(CONFIG_ALPHA_SABLE) && defined(CONFIG_ALPHA_GAMMA))
+#undef GAMMA_BIAS
+#define GAMMA_BIAS _GAMMA_BIAS
+struct alpha_machine_vector sable_gamma_mv __initmv = {
+ .vector_name = "Sable-Gamma",
+ DO_EV5_MMU,
+ DO_DEFAULT_RTC,
+ DO_T2_IO,
+ .machine_check = t2_machine_check,
+ .max_isa_dma_address = ALPHA_SABLE_MAX_ISA_DMA_ADDRESS,
+ .min_io_address = EISA_DEFAULT_IO_BASE,
+ .min_mem_address = T2_DEFAULT_MEM_BASE,
+
+ .nr_irqs = 40,
+ .device_interrupt = sable_lynx_srm_device_interrupt,
+
+ .init_arch = t2_init_arch,
+ .init_irq = sable_init_irq,
+ .init_rtc = common_init_rtc,
+ .init_pci = sable_lynx_init_pci,
+ .kill_arch = t2_kill_arch,
+ .pci_map_irq = sable_map_irq,
+ .pci_swizzle = common_swizzle,
+
+ .sys = { .t2 = {
+ .gamma_bias = _GAMMA_BIAS
+ } }
+};
+ALIAS_MV(sable_gamma)
+#endif /* GENERIC || (SABLE && GAMMA) */
+
+#if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_LYNX)
+#undef GAMMA_BIAS
+#define GAMMA_BIAS _GAMMA_BIAS
+struct alpha_machine_vector lynx_mv __initmv = {
+ .vector_name = "Lynx",
+ DO_EV4_MMU,
+ DO_DEFAULT_RTC,
+ DO_T2_IO,
+ .machine_check = t2_machine_check,
+ .max_isa_dma_address = ALPHA_SABLE_MAX_ISA_DMA_ADDRESS,
+ .min_io_address = EISA_DEFAULT_IO_BASE,
+ .min_mem_address = T2_DEFAULT_MEM_BASE,
+
+ .nr_irqs = 64,
+ .device_interrupt = sable_lynx_srm_device_interrupt,
+
+ .init_arch = t2_init_arch,
+ .init_irq = lynx_init_irq,
+ .init_rtc = common_init_rtc,
+ .init_pci = sable_lynx_init_pci,
+ .kill_arch = t2_kill_arch,
+ .pci_map_irq = lynx_map_irq,
+ .pci_swizzle = lynx_swizzle,
+
+ .sys = { .t2 = {
+ .gamma_bias = _GAMMA_BIAS
+ } }
+};
+ALIAS_MV(lynx)
+#endif /* GENERIC || LYNX */
diff --git a/arch/alpha/kernel/sys_sio.c b/arch/alpha/kernel/sys_sio.c
new file mode 100644
index 000000000000..47df48a6ddb7
--- /dev/null
+++ b/arch/alpha/kernel/sys_sio.c
@@ -0,0 +1,438 @@
+/*
+ * linux/arch/alpha/kernel/sys_sio.c
+ *
+ * Copyright (C) 1995 David A Rusling
+ * Copyright (C) 1996 Jay A Estabrook
+ * Copyright (C) 1998, 1999 Richard Henderson
+ *
+ * Code for all boards that route the PCI interrupts through the SIO
+ * PCI/ISA bridge. This includes Noname (AXPpci33), Multia (UDB),
+ * Kenetics's Platform 2000, Avanti (AlphaStation), XL, and AlphaBook1.
+ */
+
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/mm.h>
+#include <linux/sched.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/tty.h>
+
+#include <asm/compiler.h>
+#include <asm/ptrace.h>
+#include <asm/system.h>
+#include <asm/dma.h>
+#include <asm/irq.h>
+#include <asm/mmu_context.h>
+#include <asm/io.h>
+#include <asm/pgtable.h>
+#include <asm/core_apecs.h>
+#include <asm/core_lca.h>
+#include <asm/tlbflush.h>
+
+#include "proto.h"
+#include "irq_impl.h"
+#include "pci_impl.h"
+#include "machvec_impl.h"
+
+#if defined(ALPHA_RESTORE_SRM_SETUP)
+/* Save LCA configuration data as the console had it set up. */
+struct
+{
+ unsigned int orig_route_tab; /* for SAVE/RESTORE */
+} saved_config __attribute((common));
+#endif
+
+
+static void __init
+sio_init_irq(void)
+{
+ if (alpha_using_srm)
+ alpha_mv.device_interrupt = srm_device_interrupt;
+
+ init_i8259a_irqs();
+ common_init_isa_dma();
+}
+
+static inline void __init
+alphabook1_init_arch(void)
+{
+ /* The AlphaBook1 has LCD video fixed at 800x600,
+ 37 rows and 100 cols. */
+ screen_info.orig_y = 37;
+ screen_info.orig_video_cols = 100;
+ screen_info.orig_video_lines = 37;
+
+ lca_init_arch();
+}
+
+
+/*
+ * sio_route_tab selects irq routing in PCI/ISA bridge so that:
+ * PIRQ0 -> irq 15
+ * PIRQ1 -> irq 9
+ * PIRQ2 -> irq 10
+ * PIRQ3 -> irq 11
+ *
+ * This probably ought to be configurable via MILO. For
+ * example, sound boards seem to like using IRQ 9.
+ *
+ * This is NOT how we should do it. PIRQ0-X should have
+ * their own IRQ's, the way intel uses the IO-APIC irq's.
+ */
+
+static void __init
+sio_pci_route(void)
+{
+#if defined(ALPHA_RESTORE_SRM_SETUP)
+ /* First, read and save the original setting. */
+ pci_bus_read_config_dword(pci_isa_hose->bus, PCI_DEVFN(7, 0), 0x60,
+ &saved_config.orig_route_tab);
+ printk("%s: PIRQ original 0x%x new 0x%x\n", __FUNCTION__,
+ saved_config.orig_route_tab, alpha_mv.sys.sio.route_tab);
+#endif
+
+ /* Now override with desired setting. */
+ pci_bus_write_config_dword(pci_isa_hose->bus, PCI_DEVFN(7, 0), 0x60,
+ alpha_mv.sys.sio.route_tab);
+}
+
+static unsigned int __init
+sio_collect_irq_levels(void)
+{
+ unsigned int level_bits = 0;
+ struct pci_dev *dev = NULL;
+
+ /* Iterate through the devices, collecting IRQ levels. */
+ while ((dev = pci_find_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) {
+ if ((dev->class >> 16 == PCI_BASE_CLASS_BRIDGE) &&
+ (dev->class >> 8 != PCI_CLASS_BRIDGE_PCMCIA))
+ continue;
+
+ if (dev->irq)
+ level_bits |= (1 << dev->irq);
+ }
+ return level_bits;
+}
+
+static void __init
+sio_fixup_irq_levels(unsigned int level_bits)
+{
+ unsigned int old_level_bits;
+
+ /*
+ * Now, make all PCI interrupts level sensitive. Notice:
+ * these registers must be accessed byte-wise. inw()/outw()
+ * don't work.
+ *
+ * Make sure to turn off any level bits set for IRQs 9,10,11,15,
+ * so that the only bits getting set are for devices actually found.
+ * Note that we do preserve the remainder of the bits, which we hope
+ * will be set correctly by ARC/SRM.
+ *
+ * Note: we at least preserve any level-set bits on AlphaBook1
+ */
+ old_level_bits = inb(0x4d0) | (inb(0x4d1) << 8);
+
+ level_bits |= (old_level_bits & 0x71ff);
+
+ outb((level_bits >> 0) & 0xff, 0x4d0);
+ outb((level_bits >> 8) & 0xff, 0x4d1);
+}
+
+static inline int __init
+noname_map_irq(struct pci_dev *dev, u8 slot, u8 pin)
+{
+ /*
+ * The Noname board has 5 PCI slots with each of the 4
+ * interrupt pins routed to different pins on the PCI/ISA
+ * bridge (PIRQ0-PIRQ3). The table below is based on
+ * information available at:
+ *
+ * http://ftp.digital.com/pub/DEC/axppci/ref_interrupts.txt
+ *
+ * I have no information on the Avanti interrupt routing, but
+ * the routing seems to be identical to the Noname except
+ * that the Avanti has an additional slot whose routing I'm
+ * unsure of.
+ *
+ * pirq_tab[0] is a fake entry to deal with old PCI boards
+ * that have the interrupt pin number hardwired to 0 (meaning
+ * that they use the default INTA line, if they are interrupt
+ * driven at all).
+ */
+ static char irq_tab[][5] __initdata = {
+ /*INT A B C D */
+ { 3, 3, 3, 3, 3}, /* idsel 6 (53c810) */
+ {-1, -1, -1, -1, -1}, /* idsel 7 (SIO: PCI/ISA bridge) */
+ { 2, 2, -1, -1, -1}, /* idsel 8 (Hack: slot closest ISA) */
+ {-1, -1, -1, -1, -1}, /* idsel 9 (unused) */
+ {-1, -1, -1, -1, -1}, /* idsel 10 (unused) */
+ { 0, 0, 2, 1, 0}, /* idsel 11 KN25_PCI_SLOT0 */
+ { 1, 1, 0, 2, 1}, /* idsel 12 KN25_PCI_SLOT1 */
+ { 2, 2, 1, 0, 2}, /* idsel 13 KN25_PCI_SLOT2 */
+ { 0, 0, 0, 0, 0}, /* idsel 14 AS255 TULIP */
+ };
+ const long min_idsel = 6, max_idsel = 14, irqs_per_slot = 5;
+ int irq = COMMON_TABLE_LOOKUP, tmp;
+ tmp = __kernel_extbl(alpha_mv.sys.sio.route_tab, irq);
+ return irq >= 0 ? tmp : -1;
+}
+
+static inline int __init
+p2k_map_irq(struct pci_dev *dev, u8 slot, u8 pin)
+{
+ static char irq_tab[][5] __initdata = {
+ /*INT A B C D */
+ { 0, 0, -1, -1, -1}, /* idsel 6 (53c810) */
+ {-1, -1, -1, -1, -1}, /* idsel 7 (SIO: PCI/ISA bridge) */
+ { 1, 1, 2, 3, 0}, /* idsel 8 (slot A) */
+ { 2, 2, 3, 0, 1}, /* idsel 9 (slot B) */
+ {-1, -1, -1, -1, -1}, /* idsel 10 (unused) */
+ {-1, -1, -1, -1, -1}, /* idsel 11 (unused) */
+ { 3, 3, -1, -1, -1}, /* idsel 12 (CMD0646) */
+ };
+ const long min_idsel = 6, max_idsel = 12, irqs_per_slot = 5;
+ int irq = COMMON_TABLE_LOOKUP, tmp;
+ tmp = __kernel_extbl(alpha_mv.sys.sio.route_tab, irq);
+ return irq >= 0 ? tmp : -1;
+}
+
+static inline void __init
+noname_init_pci(void)
+{
+ common_init_pci();
+ sio_pci_route();
+ sio_fixup_irq_levels(sio_collect_irq_levels());
+ ns87312_enable_ide(0x26e);
+}
+
+static inline void __init
+alphabook1_init_pci(void)
+{
+ struct pci_dev *dev;
+ unsigned char orig, config;
+
+ common_init_pci();
+ sio_pci_route();
+
+ /*
+ * On the AlphaBook1, the PCMCIA chip (Cirrus 6729)
+ * is sensitive to PCI bus bursts, so we must DISABLE
+ * burst mode for the NCR 8xx SCSI... :-(
+ *
+ * Note that the NCR810 SCSI driver must preserve the
+ * setting of the bit in order for this to work. At the
+ * moment (2.0.29), ncr53c8xx.c does NOT do this, but
+ * 53c7,8xx.c DOES.
+ */
+
+ dev = NULL;
+ while ((dev = pci_find_device(PCI_VENDOR_ID_NCR, PCI_ANY_ID, dev))) {
+ if (dev->device == PCI_DEVICE_ID_NCR_53C810
+ || dev->device == PCI_DEVICE_ID_NCR_53C815
+ || dev->device == PCI_DEVICE_ID_NCR_53C820
+ || dev->device == PCI_DEVICE_ID_NCR_53C825) {
+ unsigned long io_port;
+ unsigned char ctest4;
+
+ io_port = dev->resource[0].start;
+ ctest4 = inb(io_port+0x21);
+ if (!(ctest4 & 0x80)) {
+ printk("AlphaBook1 NCR init: setting"
+ " burst disable\n");
+ outb(ctest4 | 0x80, io_port+0x21);
+ }
+ }
+ }
+
+ /* Do not set *ANY* level triggers for AlphaBook1. */
+ sio_fixup_irq_levels(0);
+
+ /* Make sure that register PR1 indicates 1Mb mem */
+ outb(0x0f, 0x3ce); orig = inb(0x3cf); /* read PR5 */
+ outb(0x0f, 0x3ce); outb(0x05, 0x3cf); /* unlock PR0-4 */
+ outb(0x0b, 0x3ce); config = inb(0x3cf); /* read PR1 */
+ if ((config & 0xc0) != 0xc0) {
+ printk("AlphaBook1 VGA init: setting 1Mb memory\n");
+ config |= 0xc0;
+ outb(0x0b, 0x3ce); outb(config, 0x3cf); /* write PR1 */
+ }
+ outb(0x0f, 0x3ce); outb(orig, 0x3cf); /* (re)lock PR0-4 */
+}
+
+void
+sio_kill_arch(int mode)
+{
+#if defined(ALPHA_RESTORE_SRM_SETUP)
+ /* Since we cannot read the PCI DMA Window CSRs, we
+ * cannot restore them here.
+ *
+ * However, we CAN read the PIRQ route register, so restore it
+ * now...
+ */
+ pci_bus_write_config_dword(pci_isa_hose->bus, PCI_DEVFN(7, 0), 0x60,
+ saved_config.orig_route_tab);
+#endif
+}
+
+
+/*
+ * The System Vectors
+ */
+
+#if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_BOOK1)
+struct alpha_machine_vector alphabook1_mv __initmv = {
+ .vector_name = "AlphaBook1",
+ DO_EV4_MMU,
+ DO_DEFAULT_RTC,
+ DO_LCA_IO,
+ .machine_check = lca_machine_check,
+ .max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS,
+ .min_io_address = DEFAULT_IO_BASE,
+ .min_mem_address = APECS_AND_LCA_DEFAULT_MEM_BASE,
+
+ .nr_irqs = 16,
+ .device_interrupt = isa_device_interrupt,
+
+ .init_arch = alphabook1_init_arch,
+ .init_irq = sio_init_irq,
+ .init_rtc = common_init_rtc,
+ .init_pci = alphabook1_init_pci,
+ .kill_arch = sio_kill_arch,
+ .pci_map_irq = noname_map_irq,
+ .pci_swizzle = common_swizzle,
+
+ .sys = { .sio = {
+ /* NCR810 SCSI is 14, PCMCIA controller is 15. */
+ .route_tab = 0x0e0f0a0a,
+ }}
+};
+ALIAS_MV(alphabook1)
+#endif
+
+#if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_AVANTI)
+struct alpha_machine_vector avanti_mv __initmv = {
+ .vector_name = "Avanti",
+ DO_EV4_MMU,
+ DO_DEFAULT_RTC,
+ DO_APECS_IO,
+ .machine_check = apecs_machine_check,
+ .max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS,
+ .min_io_address = DEFAULT_IO_BASE,
+ .min_mem_address = APECS_AND_LCA_DEFAULT_MEM_BASE,
+
+ .nr_irqs = 16,
+ .device_interrupt = isa_device_interrupt,
+
+ .init_arch = apecs_init_arch,
+ .init_irq = sio_init_irq,
+ .init_rtc = common_init_rtc,
+ .init_pci = noname_init_pci,
+ .kill_arch = sio_kill_arch,
+ .pci_map_irq = noname_map_irq,
+ .pci_swizzle = common_swizzle,
+
+ .sys = { .sio = {
+ .route_tab = 0x0b0a0e0f,
+ }}
+};
+ALIAS_MV(avanti)
+#endif
+
+#if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_NONAME)
+struct alpha_machine_vector noname_mv __initmv = {
+ .vector_name = "Noname",
+ DO_EV4_MMU,
+ DO_DEFAULT_RTC,
+ DO_LCA_IO,
+ .machine_check = lca_machine_check,
+ .max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS,
+ .min_io_address = DEFAULT_IO_BASE,
+ .min_mem_address = APECS_AND_LCA_DEFAULT_MEM_BASE,
+
+ .nr_irqs = 16,
+ .device_interrupt = srm_device_interrupt,
+
+ .init_arch = lca_init_arch,
+ .init_irq = sio_init_irq,
+ .init_rtc = common_init_rtc,
+ .init_pci = noname_init_pci,
+ .kill_arch = sio_kill_arch,
+ .pci_map_irq = noname_map_irq,
+ .pci_swizzle = common_swizzle,
+
+ .sys = { .sio = {
+ /* For UDB, the only available PCI slot must not map to IRQ 9,
+ since that's the builtin MSS sound chip. That PCI slot
+ will map to PIRQ1 (for INTA at least), so we give it IRQ 15
+ instead.
+
+ Unfortunately we have to do this for NONAME as well, since
+ they are co-indicated when the platform type "Noname" is
+ selected... :-( */
+
+ .route_tab = 0x0b0a0f0d,
+ }}
+};
+ALIAS_MV(noname)
+#endif
+
+#if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_P2K)
+struct alpha_machine_vector p2k_mv __initmv = {
+ .vector_name = "Platform2000",
+ DO_EV4_MMU,
+ DO_DEFAULT_RTC,
+ DO_LCA_IO,
+ .machine_check = lca_machine_check,
+ .max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS,
+ .min_io_address = DEFAULT_IO_BASE,
+ .min_mem_address = APECS_AND_LCA_DEFAULT_MEM_BASE,
+
+ .nr_irqs = 16,
+ .device_interrupt = srm_device_interrupt,
+
+ .init_arch = lca_init_arch,
+ .init_irq = sio_init_irq,
+ .init_rtc = common_init_rtc,
+ .init_pci = noname_init_pci,
+ .kill_arch = sio_kill_arch,
+ .pci_map_irq = p2k_map_irq,
+ .pci_swizzle = common_swizzle,
+
+ .sys = { .sio = {
+ .route_tab = 0x0b0a090f,
+ }}
+};
+ALIAS_MV(p2k)
+#endif
+
+#if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_XL)
+struct alpha_machine_vector xl_mv __initmv = {
+ .vector_name = "XL",
+ DO_EV4_MMU,
+ DO_DEFAULT_RTC,
+ DO_APECS_IO,
+ .machine_check = apecs_machine_check,
+ .max_isa_dma_address = ALPHA_XL_MAX_ISA_DMA_ADDRESS,
+ .min_io_address = DEFAULT_IO_BASE,
+ .min_mem_address = XL_DEFAULT_MEM_BASE,
+
+ .nr_irqs = 16,
+ .device_interrupt = isa_device_interrupt,
+
+ .init_arch = apecs_init_arch,
+ .init_irq = sio_init_irq,
+ .init_rtc = common_init_rtc,
+ .init_pci = noname_init_pci,
+ .kill_arch = sio_kill_arch,
+ .pci_map_irq = noname_map_irq,
+ .pci_swizzle = common_swizzle,
+
+ .sys = { .sio = {
+ .route_tab = 0x0b0a090f,
+ }}
+};
+ALIAS_MV(xl)
+#endif
diff --git a/arch/alpha/kernel/sys_sx164.c b/arch/alpha/kernel/sys_sx164.c
new file mode 100644
index 000000000000..94ad68b7c0ae
--- /dev/null
+++ b/arch/alpha/kernel/sys_sx164.c
@@ -0,0 +1,178 @@
+/*
+ * linux/arch/alpha/kernel/sys_sx164.c
+ *
+ * Copyright (C) 1995 David A Rusling
+ * Copyright (C) 1996 Jay A Estabrook
+ * Copyright (C) 1998, 1999, 2000 Richard Henderson
+ *
+ * Code supporting the SX164 (PCA56+PYXIS).
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/mm.h>
+#include <linux/sched.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/bitops.h>
+
+#include <asm/ptrace.h>
+#include <asm/system.h>
+#include <asm/dma.h>
+#include <asm/irq.h>
+#include <asm/mmu_context.h>
+#include <asm/io.h>
+#include <asm/pgtable.h>
+#include <asm/core_cia.h>
+#include <asm/hwrpb.h>
+#include <asm/tlbflush.h>
+
+#include "proto.h"
+#include "irq_impl.h"
+#include "pci_impl.h"
+#include "machvec_impl.h"
+
+
+static void __init
+sx164_init_irq(void)
+{
+ outb(0, DMA1_RESET_REG);
+ outb(0, DMA2_RESET_REG);
+ outb(DMA_MODE_CASCADE, DMA2_MODE_REG);
+ outb(0, DMA2_MASK_REG);
+
+ if (alpha_using_srm)
+ alpha_mv.device_interrupt = srm_device_interrupt;
+
+ init_i8259a_irqs();
+
+ /* Not interested in the bogus interrupts (0,3,4,5,40-47),
+ NMI (1), or HALT (2). */
+ if (alpha_using_srm)
+ init_srm_irqs(40, 0x3f0000);
+ else
+ init_pyxis_irqs(0xff00003f0000UL);
+
+ setup_irq(16+6, &timer_cascade_irqaction);
+}
+
+/*
+ * PCI Fixup configuration.
+ *
+ * Summary @ PYXIS_INT_REQ:
+ * Bit Meaning
+ * 0 RSVD
+ * 1 NMI
+ * 2 Halt/Reset switch
+ * 3 MBZ
+ * 4 RAZ
+ * 5 RAZ
+ * 6 Interval timer (RTC)
+ * 7 PCI-ISA Bridge
+ * 8 Interrupt Line A from slot 3
+ * 9 Interrupt Line A from slot 2
+ *10 Interrupt Line A from slot 1
+ *11 Interrupt Line A from slot 0
+ *12 Interrupt Line B from slot 3
+ *13 Interrupt Line B from slot 2
+ *14 Interrupt Line B from slot 1
+ *15 Interrupt line B from slot 0
+ *16 Interrupt Line C from slot 3
+ *17 Interrupt Line C from slot 2
+ *18 Interrupt Line C from slot 1
+ *19 Interrupt Line C from slot 0
+ *20 Interrupt Line D from slot 3
+ *21 Interrupt Line D from slot 2
+ *22 Interrupt Line D from slot 1
+ *23 Interrupt Line D from slot 0
+ *
+ * IdSel
+ * 5 32 bit PCI option slot 2
+ * 6 64 bit PCI option slot 0
+ * 7 64 bit PCI option slot 1
+ * 8 Cypress I/O
+ * 9 32 bit PCI option slot 3
+ */
+
+static int __init
+sx164_map_irq(struct pci_dev *dev, u8 slot, u8 pin)
+{
+ static char irq_tab[5][5] __initdata = {
+ /*INT INTA INTB INTC INTD */
+ { 16+ 9, 16+ 9, 16+13, 16+17, 16+21}, /* IdSel 5 slot 2 J17 */
+ { 16+11, 16+11, 16+15, 16+19, 16+23}, /* IdSel 6 slot 0 J19 */
+ { 16+10, 16+10, 16+14, 16+18, 16+22}, /* IdSel 7 slot 1 J18 */
+ { -1, -1, -1, -1, -1}, /* IdSel 8 SIO */
+ { 16+ 8, 16+ 8, 16+12, 16+16, 16+20} /* IdSel 9 slot 3 J15 */
+ };
+ const long min_idsel = 5, max_idsel = 9, irqs_per_slot = 5;
+ return COMMON_TABLE_LOOKUP;
+}
+
+static void __init
+sx164_init_pci(void)
+{
+ cia_init_pci();
+ SMC669_Init(0);
+}
+
+static void __init
+sx164_init_arch(void)
+{
+ /*
+ * OSF palcode v1.23 forgets to enable PCA56 Motion Video
+ * Instructions. Let's enable it.
+ * We have to check palcode revision because CSERVE interface
+ * is subject to change without notice. For example, it
+ * has been changed completely since v1.16 (found in MILO
+ * distribution). -ink
+ */
+ struct percpu_struct *cpu = (struct percpu_struct*)
+ ((char*)hwrpb + hwrpb->processor_offset);
+
+ if (amask(AMASK_MAX) != 0
+ && alpha_using_srm
+ && (cpu->pal_revision & 0xffff) == 0x117) {
+ __asm__ __volatile__(
+ "lda $16,8($31)\n"
+ "call_pal 9\n" /* Allow PALRES insns in kernel mode */
+ ".long 0x64000118\n\n" /* hw_mfpr $0,icsr */
+ "ldah $16,(1<<(19-16))($31)\n"
+ "or $0,$16,$0\n" /* set MVE bit */
+ ".long 0x74000118\n" /* hw_mtpr $0,icsr */
+ "lda $16,9($31)\n"
+ "call_pal 9" /* Disable PALRES insns */
+ : : : "$0", "$16");
+ printk("PCA56 MVI set enabled\n");
+ }
+
+ pyxis_init_arch();
+}
+
+/*
+ * The System Vector
+ */
+
+struct alpha_machine_vector sx164_mv __initmv = {
+ .vector_name = "SX164",
+ DO_EV5_MMU,
+ DO_DEFAULT_RTC,
+ DO_PYXIS_IO,
+ .machine_check = cia_machine_check,
+ .max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS,
+ .min_io_address = DEFAULT_IO_BASE,
+ .min_mem_address = DEFAULT_MEM_BASE,
+ .pci_dac_offset = PYXIS_DAC_OFFSET,
+
+ .nr_irqs = 48,
+ .device_interrupt = pyxis_device_interrupt,
+
+ .init_arch = sx164_init_arch,
+ .init_irq = sx164_init_irq,
+ .init_rtc = common_init_rtc,
+ .init_pci = sx164_init_pci,
+ .kill_arch = cia_kill_arch,
+ .pci_map_irq = sx164_map_irq,
+ .pci_swizzle = common_swizzle,
+};
+ALIAS_MV(sx164)
diff --git a/arch/alpha/kernel/sys_takara.c b/arch/alpha/kernel/sys_takara.c
new file mode 100644
index 000000000000..7955bdfc2db0
--- /dev/null
+++ b/arch/alpha/kernel/sys_takara.c
@@ -0,0 +1,296 @@
+/*
+ * linux/arch/alpha/kernel/sys_takara.c
+ *
+ * Copyright (C) 1995 David A Rusling
+ * Copyright (C) 1996 Jay A Estabrook
+ * Copyright (C) 1998, 1999 Richard Henderson
+ *
+ * Code supporting the TAKARA.
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/mm.h>
+#include <linux/sched.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+
+#include <asm/ptrace.h>
+#include <asm/system.h>
+#include <asm/dma.h>
+#include <asm/irq.h>
+#include <asm/mmu_context.h>
+#include <asm/io.h>
+#include <asm/pgtable.h>
+#include <asm/core_cia.h>
+#include <asm/tlbflush.h>
+
+#include "proto.h"
+#include "irq_impl.h"
+#include "pci_impl.h"
+#include "machvec_impl.h"
+
+
+/* Note mask bit is true for DISABLED irqs. */
+static unsigned long cached_irq_mask[2] = { -1, -1 };
+
+static inline void
+takara_update_irq_hw(unsigned long irq, unsigned long mask)
+{
+ int regaddr;
+
+ mask = (irq >= 64 ? mask << 16 : mask >> ((irq - 16) & 0x30));
+ regaddr = 0x510 + (((irq - 16) >> 2) & 0x0c);
+ outl(mask & 0xffff0000UL, regaddr);
+}
+
+static inline void
+takara_enable_irq(unsigned int irq)
+{
+ unsigned long mask;
+ mask = (cached_irq_mask[irq >= 64] &= ~(1UL << (irq & 63)));
+ takara_update_irq_hw(irq, mask);
+}
+
+static void
+takara_disable_irq(unsigned int irq)
+{
+ unsigned long mask;
+ mask = (cached_irq_mask[irq >= 64] |= 1UL << (irq & 63));
+ takara_update_irq_hw(irq, mask);
+}
+
+static unsigned int
+takara_startup_irq(unsigned int irq)
+{
+ takara_enable_irq(irq);
+ return 0; /* never anything pending */
+}
+
+static void
+takara_end_irq(unsigned int irq)
+{
+ if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS)))
+ takara_enable_irq(irq);
+}
+
+static struct hw_interrupt_type takara_irq_type = {
+ .typename = "TAKARA",
+ .startup = takara_startup_irq,
+ .shutdown = takara_disable_irq,
+ .enable = takara_enable_irq,
+ .disable = takara_disable_irq,
+ .ack = takara_disable_irq,
+ .end = takara_end_irq,
+};
+
+static void
+takara_device_interrupt(unsigned long vector, struct pt_regs *regs)
+{
+ unsigned intstatus;
+
+ /*
+ * The PALcode will have passed us vectors 0x800 or 0x810,
+ * which are fairly arbitrary values and serve only to tell
+ * us whether an interrupt has come in on IRQ0 or IRQ1. If
+ * it's IRQ1 it's a PCI interrupt; if it's IRQ0, it's
+ * probably ISA, but PCI interrupts can come through IRQ0
+ * as well if the interrupt controller isn't in accelerated
+ * mode.
+ *
+ * OTOH, the accelerator thing doesn't seem to be working
+ * overly well, so what we'll do instead is try directly
+ * examining the Master Interrupt Register to see if it's a
+ * PCI interrupt, and if _not_ then we'll pass it on to the
+ * ISA handler.
+ */
+
+ intstatus = inw(0x500) & 15;
+ if (intstatus) {
+ /*
+ * This is a PCI interrupt. Check each bit and
+ * despatch an interrupt if it's set.
+ */
+
+ if (intstatus & 8) handle_irq(16+3, regs);
+ if (intstatus & 4) handle_irq(16+2, regs);
+ if (intstatus & 2) handle_irq(16+1, regs);
+ if (intstatus & 1) handle_irq(16+0, regs);
+ } else {
+ isa_device_interrupt (vector, regs);
+ }
+}
+
+static void
+takara_srm_device_interrupt(unsigned long vector, struct pt_regs * regs)
+{
+ int irq = (vector - 0x800) >> 4;
+ handle_irq(irq, regs);
+}
+
+static void __init
+takara_init_irq(void)
+{
+ long i;
+
+ init_i8259a_irqs();
+
+ if (alpha_using_srm) {
+ alpha_mv.device_interrupt = takara_srm_device_interrupt;
+ } else {
+ unsigned int ctlreg = inl(0x500);
+
+ /* Return to non-accelerated mode. */
+ ctlreg &= ~0x8000;
+ outl(ctlreg, 0x500);
+
+ /* Enable the PCI interrupt register. */
+ ctlreg = 0x05107c00;
+ outl(ctlreg, 0x500);
+ }
+
+ for (i = 16; i < 128; i += 16)
+ takara_update_irq_hw(i, -1);
+
+ for (i = 16; i < 128; ++i) {
+ irq_desc[i].status = IRQ_DISABLED | IRQ_LEVEL;
+ irq_desc[i].handler = &takara_irq_type;
+ }
+
+ common_init_isa_dma();
+}
+
+
+/*
+ * The Takara has PCI devices 1, 2, and 3 configured to slots 20,
+ * 19, and 18 respectively, in the default configuration. They can
+ * also be jumpered to slots 8, 7, and 6 respectively, which is fun
+ * because the SIO ISA bridge can also be slot 7. However, the SIO
+ * doesn't explicitly generate PCI-type interrupts, so we can
+ * assign it whatever the hell IRQ we like and it doesn't matter.
+ */
+
+static int __init
+takara_map_irq_srm(struct pci_dev *dev, u8 slot, u8 pin)
+{
+ static char irq_tab[15][5] __initdata = {
+ { 16+3, 16+3, 16+3, 16+3, 16+3}, /* slot 6 == device 3 */
+ { 16+2, 16+2, 16+2, 16+2, 16+2}, /* slot 7 == device 2 */
+ { 16+1, 16+1, 16+1, 16+1, 16+1}, /* slot 8 == device 1 */
+ { -1, -1, -1, -1, -1}, /* slot 9 == nothing */
+ { -1, -1, -1, -1, -1}, /* slot 10 == nothing */
+ { -1, -1, -1, -1, -1}, /* slot 11 == nothing */
+ /* These are behind the bridges. */
+ { 12, 12, 13, 14, 15}, /* slot 12 == nothing */
+ { 8, 8, 9, 19, 11}, /* slot 13 == nothing */
+ { 4, 4, 5, 6, 7}, /* slot 14 == nothing */
+ { 0, 0, 1, 2, 3}, /* slot 15 == nothing */
+ { -1, -1, -1, -1, -1}, /* slot 16 == nothing */
+ {64+ 0, 64+0, 64+1, 64+2, 64+3}, /* slot 17= device 4 */
+ {48+ 0, 48+0, 48+1, 48+2, 48+3}, /* slot 18= device 3 */
+ {32+ 0, 32+0, 32+1, 32+2, 32+3}, /* slot 19= device 2 */
+ {16+ 0, 16+0, 16+1, 16+2, 16+3}, /* slot 20= device 1 */
+ };
+ const long min_idsel = 6, max_idsel = 20, irqs_per_slot = 5;
+ int irq = COMMON_TABLE_LOOKUP;
+ if (irq >= 0 && irq < 16) {
+ /* Guess that we are behind a bridge. */
+ unsigned int busslot = PCI_SLOT(dev->bus->self->devfn);
+ irq += irq_tab[busslot-min_idsel][0];
+ }
+ return irq;
+}
+
+static int __init
+takara_map_irq(struct pci_dev *dev, u8 slot, u8 pin)
+{
+ static char irq_tab[15][5] __initdata = {
+ { 16+3, 16+3, 16+3, 16+3, 16+3}, /* slot 6 == device 3 */
+ { 16+2, 16+2, 16+2, 16+2, 16+2}, /* slot 7 == device 2 */
+ { 16+1, 16+1, 16+1, 16+1, 16+1}, /* slot 8 == device 1 */
+ { -1, -1, -1, -1, -1}, /* slot 9 == nothing */
+ { -1, -1, -1, -1, -1}, /* slot 10 == nothing */
+ { -1, -1, -1, -1, -1}, /* slot 11 == nothing */
+ { -1, -1, -1, -1, -1}, /* slot 12 == nothing */
+ { -1, -1, -1, -1, -1}, /* slot 13 == nothing */
+ { -1, -1, -1, -1, -1}, /* slot 14 == nothing */
+ { -1, -1, -1, -1, -1}, /* slot 15 == nothing */
+ { -1, -1, -1, -1, -1}, /* slot 16 == nothing */
+ { -1, -1, -1, -1, -1}, /* slot 17 == nothing */
+ { 16+3, 16+3, 16+3, 16+3, 16+3}, /* slot 18 == device 3 */
+ { 16+2, 16+2, 16+2, 16+2, 16+2}, /* slot 19 == device 2 */
+ { 16+1, 16+1, 16+1, 16+1, 16+1}, /* slot 20 == device 1 */
+ };
+ const long min_idsel = 6, max_idsel = 20, irqs_per_slot = 5;
+ return COMMON_TABLE_LOOKUP;
+}
+
+static u8 __init
+takara_swizzle(struct pci_dev *dev, u8 *pinp)
+{
+ int slot = PCI_SLOT(dev->devfn);
+ int pin = *pinp;
+ unsigned int ctlreg = inl(0x500);
+ unsigned int busslot;
+
+ if (!dev->bus->self)
+ return slot;
+
+ busslot = PCI_SLOT(dev->bus->self->devfn);
+ /* Check for built-in bridges. */
+ if (dev->bus->number != 0
+ && busslot > 16
+ && ((1<<(36-busslot)) & ctlreg)) {
+ if (pin == 1)
+ pin += (20 - busslot);
+ else {
+ printk(KERN_WARNING "takara_swizzle: can only "
+ "handle cards with INTA IRQ pin.\n");
+ }
+ } else {
+ /* Must be a card-based bridge. */
+ printk(KERN_WARNING "takara_swizzle: cannot handle "
+ "card-bridge behind builtin bridge yet.\n");
+ }
+
+ *pinp = pin;
+ return slot;
+}
+
+static void __init
+takara_init_pci(void)
+{
+ if (alpha_using_srm)
+ alpha_mv.pci_map_irq = takara_map_irq_srm;
+
+ cia_init_pci();
+ ns87312_enable_ide(0x26e);
+}
+
+
+/*
+ * The System Vector
+ */
+
+struct alpha_machine_vector takara_mv __initmv = {
+ .vector_name = "Takara",
+ DO_EV5_MMU,
+ DO_DEFAULT_RTC,
+ DO_CIA_IO,
+ .machine_check = cia_machine_check,
+ .max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS,
+ .min_io_address = DEFAULT_IO_BASE,
+ .min_mem_address = CIA_DEFAULT_MEM_BASE,
+
+ .nr_irqs = 128,
+ .device_interrupt = takara_device_interrupt,
+
+ .init_arch = cia_init_arch,
+ .init_irq = takara_init_irq,
+ .init_rtc = common_init_rtc,
+ .init_pci = takara_init_pci,
+ .kill_arch = cia_kill_arch,
+ .pci_map_irq = takara_map_irq,
+ .pci_swizzle = takara_swizzle,
+};
+ALIAS_MV(takara)
diff --git a/arch/alpha/kernel/sys_titan.c b/arch/alpha/kernel/sys_titan.c
new file mode 100644
index 000000000000..5f84417eeb7b
--- /dev/null
+++ b/arch/alpha/kernel/sys_titan.c
@@ -0,0 +1,420 @@
+/*
+ * linux/arch/alpha/kernel/sys_titan.c
+ *
+ * Copyright (C) 1995 David A Rusling
+ * Copyright (C) 1996, 1999 Jay A Estabrook
+ * Copyright (C) 1998, 1999 Richard Henderson
+ * Copyright (C) 1999, 2000 Jeff Wiedemeier
+ *
+ * Code supporting TITAN systems (EV6+TITAN), currently:
+ * Privateer
+ * Falcon
+ * Granite
+ */
+
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/mm.h>
+#include <linux/sched.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/bitops.h>
+
+#include <asm/ptrace.h>
+#include <asm/system.h>
+#include <asm/dma.h>
+#include <asm/irq.h>
+#include <asm/mmu_context.h>
+#include <asm/io.h>
+#include <asm/pgtable.h>
+#include <asm/core_titan.h>
+#include <asm/hwrpb.h>
+#include <asm/tlbflush.h>
+
+#include "proto.h"
+#include "irq_impl.h"
+#include "pci_impl.h"
+#include "machvec_impl.h"
+#include "err_impl.h"
+
+
+/*
+ * Titan generic
+ */
+
+/*
+ * Titan supports up to 4 CPUs
+ */
+static unsigned long titan_cpu_irq_affinity[4] = { ~0UL, ~0UL, ~0UL, ~0UL };
+
+/*
+ * Mask is set (1) if enabled
+ */
+static unsigned long titan_cached_irq_mask;
+
+/*
+ * Need SMP-safe access to interrupt CSRs
+ */
+DEFINE_SPINLOCK(titan_irq_lock);
+
+static void
+titan_update_irq_hw(unsigned long mask)
+{
+ register titan_cchip *cchip = TITAN_cchip;
+ unsigned long isa_enable = 1UL << 55;
+ register int bcpu = boot_cpuid;
+
+#ifdef CONFIG_SMP
+ cpumask_t cpm = cpu_present_mask;
+ volatile unsigned long *dim0, *dim1, *dim2, *dim3;
+ unsigned long mask0, mask1, mask2, mask3, dummy;
+
+ mask &= ~isa_enable;
+ mask0 = mask & titan_cpu_irq_affinity[0];
+ mask1 = mask & titan_cpu_irq_affinity[1];
+ mask2 = mask & titan_cpu_irq_affinity[2];
+ mask3 = mask & titan_cpu_irq_affinity[3];
+
+ if (bcpu == 0) mask0 |= isa_enable;
+ else if (bcpu == 1) mask1 |= isa_enable;
+ else if (bcpu == 2) mask2 |= isa_enable;
+ else mask3 |= isa_enable;
+
+ dim0 = &cchip->dim0.csr;
+ dim1 = &cchip->dim1.csr;
+ dim2 = &cchip->dim2.csr;
+ dim3 = &cchip->dim3.csr;
+ if (!cpu_isset(0, cpm)) dim0 = &dummy;
+ if (!cpu_isset(1, cpm)) dim1 = &dummy;
+ if (!cpu_isset(2, cpm)) dim2 = &dummy;
+ if (!cpu_isset(3, cpm)) dim3 = &dummy;
+
+ *dim0 = mask0;
+ *dim1 = mask1;
+ *dim2 = mask2;
+ *dim3 = mask3;
+ mb();
+ *dim0;
+ *dim1;
+ *dim2;
+ *dim3;
+#else
+ volatile unsigned long *dimB;
+ dimB = &cchip->dim0.csr;
+ if (bcpu == 1) dimB = &cchip->dim1.csr;
+ else if (bcpu == 2) dimB = &cchip->dim2.csr;
+ else if (bcpu == 3) dimB = &cchip->dim3.csr;
+
+ *dimB = mask | isa_enable;
+ mb();
+ *dimB;
+#endif
+}
+
+static inline void
+titan_enable_irq(unsigned int irq)
+{
+ spin_lock(&titan_irq_lock);
+ titan_cached_irq_mask |= 1UL << (irq - 16);
+ titan_update_irq_hw(titan_cached_irq_mask);
+ spin_unlock(&titan_irq_lock);
+}
+
+static inline void
+titan_disable_irq(unsigned int irq)
+{
+ spin_lock(&titan_irq_lock);
+ titan_cached_irq_mask &= ~(1UL << (irq - 16));
+ titan_update_irq_hw(titan_cached_irq_mask);
+ spin_unlock(&titan_irq_lock);
+}
+
+static unsigned int
+titan_startup_irq(unsigned int irq)
+{
+ titan_enable_irq(irq);
+ return 0; /* never anything pending */
+}
+
+static void
+titan_end_irq(unsigned int irq)
+{
+ if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS)))
+ titan_enable_irq(irq);
+}
+
+static void
+titan_cpu_set_irq_affinity(unsigned int irq, cpumask_t affinity)
+{
+ int cpu;
+
+ for (cpu = 0; cpu < 4; cpu++) {
+ if (cpu_isset(cpu, affinity))
+ titan_cpu_irq_affinity[cpu] |= 1UL << irq;
+ else
+ titan_cpu_irq_affinity[cpu] &= ~(1UL << irq);
+ }
+
+}
+
+static void
+titan_set_irq_affinity(unsigned int irq, cpumask_t affinity)
+{
+ spin_lock(&titan_irq_lock);
+ titan_cpu_set_irq_affinity(irq - 16, affinity);
+ titan_update_irq_hw(titan_cached_irq_mask);
+ spin_unlock(&titan_irq_lock);
+}
+
+static void
+titan_device_interrupt(unsigned long vector, struct pt_regs * regs)
+{
+ printk("titan_device_interrupt: NOT IMPLEMENTED YET!! \n");
+}
+
+static void
+titan_srm_device_interrupt(unsigned long vector, struct pt_regs * regs)
+{
+ int irq;
+
+ irq = (vector - 0x800) >> 4;
+ handle_irq(irq, regs);
+}
+
+
+static void __init
+init_titan_irqs(struct hw_interrupt_type * ops, int imin, int imax)
+{
+ long i;
+ for (i = imin; i <= imax; ++i) {
+ irq_desc[i].status = IRQ_DISABLED | IRQ_LEVEL;
+ irq_desc[i].handler = ops;
+ }
+}
+
+static struct hw_interrupt_type titan_irq_type = {
+ .typename = "TITAN",
+ .startup = titan_startup_irq,
+ .shutdown = titan_disable_irq,
+ .enable = titan_enable_irq,
+ .disable = titan_disable_irq,
+ .ack = titan_disable_irq,
+ .end = titan_end_irq,
+ .set_affinity = titan_set_irq_affinity,
+};
+
+static irqreturn_t
+titan_intr_nop(int irq, void *dev_id, struct pt_regs *regs)
+{
+ /*
+ * This is a NOP interrupt handler for the purposes of
+ * event counting -- just return.
+ */
+ return IRQ_HANDLED;
+}
+
+static void __init
+titan_init_irq(void)
+{
+ if (alpha_using_srm && !alpha_mv.device_interrupt)
+ alpha_mv.device_interrupt = titan_srm_device_interrupt;
+ if (!alpha_mv.device_interrupt)
+ alpha_mv.device_interrupt = titan_device_interrupt;
+
+ titan_update_irq_hw(0);
+
+ init_titan_irqs(&titan_irq_type, 16, 63 + 16);
+}
+
+static void __init
+titan_legacy_init_irq(void)
+{
+ /* init the legacy dma controller */
+ outb(0, DMA1_RESET_REG);
+ outb(0, DMA2_RESET_REG);
+ outb(DMA_MODE_CASCADE, DMA2_MODE_REG);
+ outb(0, DMA2_MASK_REG);
+
+ /* init the legacy irq controller */
+ init_i8259a_irqs();
+
+ /* init the titan irqs */
+ titan_init_irq();
+}
+
+void
+titan_dispatch_irqs(u64 mask, struct pt_regs *regs)
+{
+ unsigned long vector;
+
+ /*
+ * Mask down to those interrupts which are enable on this processor
+ */
+ mask &= titan_cpu_irq_affinity[smp_processor_id()];
+
+ /*
+ * Dispatch all requested interrupts
+ */
+ while (mask) {
+ /* convert to SRM vector... priority is <63> -> <0> */
+ __asm__("ctlz %1, %0" : "=r"(vector) : "r"(mask));
+ vector = 63 - vector;
+ mask &= ~(1UL << vector); /* clear it out */
+ vector = 0x900 + (vector << 4); /* convert to SRM vector */
+
+ /* dispatch it */
+ alpha_mv.device_interrupt(vector, regs);
+ }
+}
+
+
+/*
+ * Titan Family
+ */
+static void __init
+titan_late_init(void)
+{
+ /*
+ * Enable the system error interrupts. These interrupts are
+ * all reported to the kernel as machine checks, so the handler
+ * is a nop so it can be called to count the individual events.
+ */
+ request_irq(63+16, titan_intr_nop, SA_INTERRUPT,
+ "CChip Error", NULL);
+ request_irq(62+16, titan_intr_nop, SA_INTERRUPT,
+ "PChip 0 H_Error", NULL);
+ request_irq(61+16, titan_intr_nop, SA_INTERRUPT,
+ "PChip 1 H_Error", NULL);
+ request_irq(60+16, titan_intr_nop, SA_INTERRUPT,
+ "PChip 0 C_Error", NULL);
+ request_irq(59+16, titan_intr_nop, SA_INTERRUPT,
+ "PChip 1 C_Error", NULL);
+
+ /*
+ * Register our error handlers.
+ */
+ titan_register_error_handlers();
+
+ /*
+ * Check if the console left us any error logs.
+ */
+ cdl_check_console_data_log();
+
+}
+
+static int __devinit
+titan_map_irq(struct pci_dev *dev, u8 slot, u8 pin)
+{
+ u8 intline;
+ int irq;
+
+ /* Get the current intline. */
+ pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &intline);
+ irq = intline;
+
+ /* Is it explicitly routed through ISA? */
+ if ((irq & 0xF0) == 0xE0)
+ return irq;
+
+ /* Offset by 16 to make room for ISA interrupts 0 - 15. */
+ return irq + 16;
+}
+
+static void __init
+titan_init_pci(void)
+{
+ /*
+ * This isn't really the right place, but there's some init
+ * that needs to be done after everything is basically up.
+ */
+ titan_late_init();
+
+ pci_probe_only = 1;
+ common_init_pci();
+ SMC669_Init(0);
+#ifdef CONFIG_VGA_HOSE
+ locate_and_init_vga(NULL);
+#endif
+}
+
+
+/*
+ * Privateer
+ */
+static void __init
+privateer_init_pci(void)
+{
+ /*
+ * Hook a couple of extra err interrupts that the
+ * common titan code won't.
+ */
+ request_irq(53+16, titan_intr_nop, SA_INTERRUPT,
+ "NMI", NULL);
+ request_irq(50+16, titan_intr_nop, SA_INTERRUPT,
+ "Temperature Warning", NULL);
+
+ /*
+ * Finish with the common version.
+ */
+ return titan_init_pci();
+}
+
+
+/*
+ * The System Vectors.
+ */
+struct alpha_machine_vector titan_mv __initmv = {
+ .vector_name = "TITAN",
+ DO_EV6_MMU,
+ DO_DEFAULT_RTC,
+ DO_TITAN_IO,
+ .machine_check = titan_machine_check,
+ .max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS,
+ .min_io_address = DEFAULT_IO_BASE,
+ .min_mem_address = DEFAULT_MEM_BASE,
+ .pci_dac_offset = TITAN_DAC_OFFSET,
+
+ .nr_irqs = 80, /* 64 + 16 */
+ /* device_interrupt will be filled in by titan_init_irq */
+
+ .agp_info = titan_agp_info,
+
+ .init_arch = titan_init_arch,
+ .init_irq = titan_legacy_init_irq,
+ .init_rtc = common_init_rtc,
+ .init_pci = titan_init_pci,
+
+ .kill_arch = titan_kill_arch,
+ .pci_map_irq = titan_map_irq,
+ .pci_swizzle = common_swizzle,
+};
+ALIAS_MV(titan)
+
+struct alpha_machine_vector privateer_mv __initmv = {
+ .vector_name = "PRIVATEER",
+ DO_EV6_MMU,
+ DO_DEFAULT_RTC,
+ DO_TITAN_IO,
+ .machine_check = privateer_machine_check,
+ .max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS,
+ .min_io_address = DEFAULT_IO_BASE,
+ .min_mem_address = DEFAULT_MEM_BASE,
+ .pci_dac_offset = TITAN_DAC_OFFSET,
+
+ .nr_irqs = 80, /* 64 + 16 */
+ /* device_interrupt will be filled in by titan_init_irq */
+
+ .agp_info = titan_agp_info,
+
+ .init_arch = titan_init_arch,
+ .init_irq = titan_legacy_init_irq,
+ .init_rtc = common_init_rtc,
+ .init_pci = privateer_init_pci,
+
+ .kill_arch = titan_kill_arch,
+ .pci_map_irq = titan_map_irq,
+ .pci_swizzle = common_swizzle,
+};
+/* No alpha_mv alias for privateer since we compile it
+ in unconditionally with titan; setup_arch knows how to cope. */
diff --git a/arch/alpha/kernel/sys_wildfire.c b/arch/alpha/kernel/sys_wildfire.c
new file mode 100644
index 000000000000..1553f470246e
--- /dev/null
+++ b/arch/alpha/kernel/sys_wildfire.c
@@ -0,0 +1,361 @@
+/*
+ * linux/arch/alpha/kernel/sys_wildfire.c
+ *
+ * Wildfire support.
+ *
+ * Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/mm.h>
+#include <linux/sched.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/bitops.h>
+
+#include <asm/ptrace.h>
+#include <asm/system.h>
+#include <asm/dma.h>
+#include <asm/irq.h>
+#include <asm/mmu_context.h>
+#include <asm/io.h>
+#include <asm/pgtable.h>
+#include <asm/core_wildfire.h>
+#include <asm/hwrpb.h>
+#include <asm/tlbflush.h>
+
+#include "proto.h"
+#include "irq_impl.h"
+#include "pci_impl.h"
+#include "machvec_impl.h"
+
+static unsigned long cached_irq_mask[WILDFIRE_NR_IRQS/(sizeof(long)*8)];
+
+DEFINE_SPINLOCK(wildfire_irq_lock);
+
+static int doing_init_irq_hw = 0;
+
+static void
+wildfire_update_irq_hw(unsigned int irq)
+{
+ int qbbno = (irq >> 8) & (WILDFIRE_MAX_QBB - 1);
+ int pcano = (irq >> 6) & (WILDFIRE_PCA_PER_QBB - 1);
+ wildfire_pca *pca;
+ volatile unsigned long * enable0;
+
+ if (!WILDFIRE_PCA_EXISTS(qbbno, pcano)) {
+ if (!doing_init_irq_hw) {
+ printk(KERN_ERR "wildfire_update_irq_hw:"
+ " got irq %d for non-existent PCA %d"
+ " on QBB %d.\n",
+ irq, pcano, qbbno);
+ }
+ return;
+ }
+
+ pca = WILDFIRE_pca(qbbno, pcano);
+ enable0 = (unsigned long *) &pca->pca_int[0].enable; /* ??? */
+
+ *enable0 = cached_irq_mask[qbbno * WILDFIRE_PCA_PER_QBB + pcano];
+ mb();
+ *enable0;
+}
+
+static void __init
+wildfire_init_irq_hw(void)
+{
+#if 0
+ register wildfire_pca * pca = WILDFIRE_pca(0, 0);
+ volatile unsigned long * enable0, * enable1, * enable2, *enable3;
+ volatile unsigned long * target0, * target1, * target2, *target3;
+
+ enable0 = (unsigned long *) &pca->pca_int[0].enable;
+ enable1 = (unsigned long *) &pca->pca_int[1].enable;
+ enable2 = (unsigned long *) &pca->pca_int[2].enable;
+ enable3 = (unsigned long *) &pca->pca_int[3].enable;
+
+ target0 = (unsigned long *) &pca->pca_int[0].target;
+ target1 = (unsigned long *) &pca->pca_int[1].target;
+ target2 = (unsigned long *) &pca->pca_int[2].target;
+ target3 = (unsigned long *) &pca->pca_int[3].target;
+
+ *enable0 = *enable1 = *enable2 = *enable3 = 0;
+
+ *target0 = (1UL<<8) | WILDFIRE_QBB(0);
+ *target1 = *target2 = *target3 = 0;
+
+ mb();
+
+ *enable0; *enable1; *enable2; *enable3;
+ *target0; *target1; *target2; *target3;
+
+#else
+ int i;
+
+ doing_init_irq_hw = 1;
+
+ /* Need to update only once for every possible PCA. */
+ for (i = 0; i < WILDFIRE_NR_IRQS; i+=WILDFIRE_IRQ_PER_PCA)
+ wildfire_update_irq_hw(i);
+
+ doing_init_irq_hw = 0;
+#endif
+}
+
+static void
+wildfire_enable_irq(unsigned int irq)
+{
+ if (irq < 16)
+ i8259a_enable_irq(irq);
+
+ spin_lock(&wildfire_irq_lock);
+ set_bit(irq, &cached_irq_mask);
+ wildfire_update_irq_hw(irq);
+ spin_unlock(&wildfire_irq_lock);
+}
+
+static void
+wildfire_disable_irq(unsigned int irq)
+{
+ if (irq < 16)
+ i8259a_disable_irq(irq);
+
+ spin_lock(&wildfire_irq_lock);
+ clear_bit(irq, &cached_irq_mask);
+ wildfire_update_irq_hw(irq);
+ spin_unlock(&wildfire_irq_lock);
+}
+
+static void
+wildfire_mask_and_ack_irq(unsigned int irq)
+{
+ if (irq < 16)
+ i8259a_mask_and_ack_irq(irq);
+
+ spin_lock(&wildfire_irq_lock);
+ clear_bit(irq, &cached_irq_mask);
+ wildfire_update_irq_hw(irq);
+ spin_unlock(&wildfire_irq_lock);
+}
+
+static unsigned int
+wildfire_startup_irq(unsigned int irq)
+{
+ wildfire_enable_irq(irq);
+ return 0; /* never anything pending */
+}
+
+static void
+wildfire_end_irq(unsigned int irq)
+{
+#if 0
+ if (!irq_desc[irq].action)
+ printk("got irq %d\n", irq);
+#endif
+ if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS)))
+ wildfire_enable_irq(irq);
+}
+
+static struct hw_interrupt_type wildfire_irq_type = {
+ .typename = "WILDFIRE",
+ .startup = wildfire_startup_irq,
+ .shutdown = wildfire_disable_irq,
+ .enable = wildfire_enable_irq,
+ .disable = wildfire_disable_irq,
+ .ack = wildfire_mask_and_ack_irq,
+ .end = wildfire_end_irq,
+};
+
+static void __init
+wildfire_init_irq_per_pca(int qbbno, int pcano)
+{
+ int i, irq_bias;
+ unsigned long io_bias;
+ static struct irqaction isa_enable = {
+ .handler = no_action,
+ .name = "isa_enable",
+ };
+
+ irq_bias = qbbno * (WILDFIRE_PCA_PER_QBB * WILDFIRE_IRQ_PER_PCA)
+ + pcano * WILDFIRE_IRQ_PER_PCA;
+
+ /* Only need the following for first PCI bus per PCA. */
+ io_bias = WILDFIRE_IO(qbbno, pcano<<1) - WILDFIRE_IO_BIAS;
+
+#if 0
+ outb(0, DMA1_RESET_REG + io_bias);
+ outb(0, DMA2_RESET_REG + io_bias);
+ outb(DMA_MODE_CASCADE, DMA2_MODE_REG + io_bias);
+ outb(0, DMA2_MASK_REG + io_bias);
+#endif
+
+#if 0
+ /* ??? Not sure how to do this, yet... */
+ init_i8259a_irqs(); /* ??? */
+#endif
+
+ for (i = 0; i < 16; ++i) {
+ if (i == 2)
+ continue;
+ irq_desc[i+irq_bias].status = IRQ_DISABLED | IRQ_LEVEL;
+ irq_desc[i+irq_bias].handler = &wildfire_irq_type;
+ }
+
+ irq_desc[36+irq_bias].status = IRQ_DISABLED | IRQ_LEVEL;
+ irq_desc[36+irq_bias].handler = &wildfire_irq_type;
+ for (i = 40; i < 64; ++i) {
+ irq_desc[i+irq_bias].status = IRQ_DISABLED | IRQ_LEVEL;
+ irq_desc[i+irq_bias].handler = &wildfire_irq_type;
+ }
+
+ setup_irq(32+irq_bias, &isa_enable);
+}
+
+static void __init
+wildfire_init_irq(void)
+{
+ int qbbno, pcano;
+
+#if 1
+ wildfire_init_irq_hw();
+ init_i8259a_irqs();
+#endif
+
+ for (qbbno = 0; qbbno < WILDFIRE_MAX_QBB; qbbno++) {
+ if (WILDFIRE_QBB_EXISTS(qbbno)) {
+ for (pcano = 0; pcano < WILDFIRE_PCA_PER_QBB; pcano++) {
+ if (WILDFIRE_PCA_EXISTS(qbbno, pcano)) {
+ wildfire_init_irq_per_pca(qbbno, pcano);
+ }
+ }
+ }
+ }
+}
+
+static void
+wildfire_device_interrupt(unsigned long vector, struct pt_regs * regs)
+{
+ int irq;
+
+ irq = (vector - 0x800) >> 4;
+
+ /*
+ * bits 10-8: source QBB ID
+ * bits 7-6: PCA
+ * bits 5-0: irq in PCA
+ */
+
+ handle_irq(irq, regs);
+ return;
+}
+
+/*
+ * PCI Fixup configuration.
+ *
+ * Summary per PCA (2 PCI or HIPPI buses):
+ *
+ * Bit Meaning
+ * 0-15 ISA
+ *
+ *32 ISA summary
+ *33 SMI
+ *34 NMI
+ *36 builtin QLogic SCSI (or slot 0 if no IO module)
+ *40 Interrupt Line A from slot 2 PCI0
+ *41 Interrupt Line B from slot 2 PCI0
+ *42 Interrupt Line C from slot 2 PCI0
+ *43 Interrupt Line D from slot 2 PCI0
+ *44 Interrupt Line A from slot 3 PCI0
+ *45 Interrupt Line B from slot 3 PCI0
+ *46 Interrupt Line C from slot 3 PCI0
+ *47 Interrupt Line D from slot 3 PCI0
+ *
+ *48 Interrupt Line A from slot 4 PCI1
+ *49 Interrupt Line B from slot 4 PCI1
+ *50 Interrupt Line C from slot 4 PCI1
+ *51 Interrupt Line D from slot 4 PCI1
+ *52 Interrupt Line A from slot 5 PCI1
+ *53 Interrupt Line B from slot 5 PCI1
+ *54 Interrupt Line C from slot 5 PCI1
+ *55 Interrupt Line D from slot 5 PCI1
+ *56 Interrupt Line A from slot 6 PCI1
+ *57 Interrupt Line B from slot 6 PCI1
+ *58 Interrupt Line C from slot 6 PCI1
+ *50 Interrupt Line D from slot 6 PCI1
+ *60 Interrupt Line A from slot 7 PCI1
+ *61 Interrupt Line B from slot 7 PCI1
+ *62 Interrupt Line C from slot 7 PCI1
+ *63 Interrupt Line D from slot 7 PCI1
+ *
+ *
+ * IdSel
+ * 0 Cypress Bridge I/O (ISA summary interrupt)
+ * 1 64 bit PCI 0 option slot 1 (SCSI QLogic builtin)
+ * 2 64 bit PCI 0 option slot 2
+ * 3 64 bit PCI 0 option slot 3
+ * 4 64 bit PCI 1 option slot 4
+ * 5 64 bit PCI 1 option slot 5
+ * 6 64 bit PCI 1 option slot 6
+ * 7 64 bit PCI 1 option slot 7
+ */
+
+static int __init
+wildfire_map_irq(struct pci_dev *dev, u8 slot, u8 pin)
+{
+ static char irq_tab[8][5] __initdata = {
+ /*INT INTA INTB INTC INTD */
+ { -1, -1, -1, -1, -1}, /* IdSel 0 ISA Bridge */
+ { 36, 36, 36+1, 36+2, 36+3}, /* IdSel 1 SCSI builtin */
+ { 40, 40, 40+1, 40+2, 40+3}, /* IdSel 2 PCI 0 slot 2 */
+ { 44, 44, 44+1, 44+2, 44+3}, /* IdSel 3 PCI 0 slot 3 */
+ { 48, 48, 48+1, 48+2, 48+3}, /* IdSel 4 PCI 1 slot 4 */
+ { 52, 52, 52+1, 52+2, 52+3}, /* IdSel 5 PCI 1 slot 5 */
+ { 56, 56, 56+1, 56+2, 56+3}, /* IdSel 6 PCI 1 slot 6 */
+ { 60, 60, 60+1, 60+2, 60+3}, /* IdSel 7 PCI 1 slot 7 */
+ };
+ long min_idsel = 0, max_idsel = 7, irqs_per_slot = 5;
+
+ struct pci_controller *hose = dev->sysdata;
+ int irq = COMMON_TABLE_LOOKUP;
+
+ if (irq > 0) {
+ int qbbno = hose->index >> 3;
+ int pcano = (hose->index >> 1) & 3;
+ irq += (qbbno << 8) + (pcano << 6);
+ }
+ return irq;
+}
+
+
+/*
+ * The System Vectors
+ */
+
+struct alpha_machine_vector wildfire_mv __initmv = {
+ .vector_name = "WILDFIRE",
+ DO_EV6_MMU,
+ DO_DEFAULT_RTC,
+ DO_WILDFIRE_IO,
+ .machine_check = wildfire_machine_check,
+ .max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS,
+ .min_io_address = DEFAULT_IO_BASE,
+ .min_mem_address = DEFAULT_MEM_BASE,
+
+ .nr_irqs = WILDFIRE_NR_IRQS,
+ .device_interrupt = wildfire_device_interrupt,
+
+ .init_arch = wildfire_init_arch,
+ .init_irq = wildfire_init_irq,
+ .init_rtc = common_init_rtc,
+ .init_pci = common_init_pci,
+ .kill_arch = wildfire_kill_arch,
+ .pci_map_irq = wildfire_map_irq,
+ .pci_swizzle = common_swizzle,
+
+ .pa_to_nid = wildfire_pa_to_nid,
+ .cpuid_to_nid = wildfire_cpuid_to_nid,
+ .node_mem_start = wildfire_node_mem_start,
+ .node_mem_size = wildfire_node_mem_size,
+};
+ALIAS_MV(wildfire)
diff --git a/arch/alpha/kernel/systbls.S b/arch/alpha/kernel/systbls.S
new file mode 100644
index 000000000000..faab8c2a03eb
--- /dev/null
+++ b/arch/alpha/kernel/systbls.S
@@ -0,0 +1,468 @@
+/*
+ * arch/alpha/kernel/systbls.S
+ *
+ * The system call table.
+ */
+
+#include <linux/config.h> /* CONFIG_OSF4_COMPAT */
+#include <asm/unistd.h>
+
+ .data
+ .align 3
+ .globl sys_call_table
+sys_call_table:
+ .quad alpha_ni_syscall /* 0 */
+ .quad sys_exit
+ .quad sys_fork
+ .quad sys_read
+ .quad sys_write
+ .quad alpha_ni_syscall /* 5 */
+ .quad sys_close
+ .quad osf_wait4
+ .quad alpha_ni_syscall
+ .quad sys_link
+ .quad sys_unlink /* 10 */
+ .quad alpha_ni_syscall
+ .quad sys_chdir
+ .quad sys_fchdir
+ .quad sys_mknod
+ .quad sys_chmod /* 15 */
+ .quad sys_chown
+ .quad osf_brk
+ .quad alpha_ni_syscall
+ .quad sys_lseek
+ .quad sys_getxpid /* 20 */
+ .quad osf_mount
+ .quad sys_umount
+ .quad sys_setuid
+ .quad sys_getxuid
+ .quad alpha_ni_syscall /* 25 */
+ .quad sys_ptrace
+ .quad alpha_ni_syscall
+ .quad alpha_ni_syscall
+ .quad alpha_ni_syscall
+ .quad alpha_ni_syscall /* 30 */
+ .quad alpha_ni_syscall
+ .quad alpha_ni_syscall
+ .quad sys_access
+ .quad alpha_ni_syscall
+ .quad alpha_ni_syscall /* 35 */
+ .quad sys_sync
+ .quad sys_kill
+ .quad alpha_ni_syscall
+ .quad sys_setpgid
+ .quad alpha_ni_syscall /* 40 */
+ .quad sys_dup
+ .quad sys_pipe
+ .quad osf_set_program_attributes
+ .quad alpha_ni_syscall
+ .quad sys_open /* 45 */
+ .quad alpha_ni_syscall
+ .quad sys_getxgid
+ .quad osf_sigprocmask
+ .quad alpha_ni_syscall
+ .quad alpha_ni_syscall /* 50 */
+ .quad sys_acct
+ .quad sys_sigpending
+ .quad alpha_ni_syscall
+ .quad sys_ioctl
+ .quad alpha_ni_syscall /* 55 */
+ .quad alpha_ni_syscall
+ .quad sys_symlink
+ .quad sys_readlink
+ .quad sys_execve
+ .quad sys_umask /* 60 */
+ .quad sys_chroot
+ .quad alpha_ni_syscall
+ .quad sys_getpgrp
+ .quad sys_getpagesize
+ .quad alpha_ni_syscall /* 65 */
+ .quad sys_vfork
+ .quad sys_newstat
+ .quad sys_newlstat
+ .quad alpha_ni_syscall
+ .quad alpha_ni_syscall /* 70 */
+ .quad osf_mmap
+ .quad alpha_ni_syscall
+ .quad sys_munmap
+ .quad sys_mprotect
+ .quad sys_madvise /* 75 */
+ .quad sys_vhangup
+ .quad alpha_ni_syscall
+ .quad alpha_ni_syscall
+ .quad sys_getgroups
+ /* map BSD's setpgrp to sys_setpgid for binary compatibility: */
+ .quad sys_setgroups /* 80 */
+ .quad alpha_ni_syscall
+ .quad sys_setpgid
+ .quad osf_setitimer
+ .quad alpha_ni_syscall
+ .quad alpha_ni_syscall /* 85 */
+ .quad osf_getitimer
+ .quad sys_gethostname
+ .quad sys_sethostname
+ .quad sys_getdtablesize
+ .quad sys_dup2 /* 90 */
+ .quad sys_newfstat
+ .quad sys_fcntl
+ .quad osf_select
+ .quad sys_poll
+ .quad sys_fsync /* 95 */
+ .quad sys_setpriority
+ .quad sys_socket
+ .quad sys_connect
+ .quad sys_accept
+ .quad osf_getpriority /* 100 */
+ .quad sys_send
+ .quad sys_recv
+ .quad sys_sigreturn
+ .quad sys_bind
+ .quad sys_setsockopt /* 105 */
+ .quad sys_listen
+ .quad alpha_ni_syscall
+ .quad alpha_ni_syscall
+ .quad alpha_ni_syscall
+ .quad alpha_ni_syscall /* 110 */
+ .quad sys_sigsuspend
+ .quad osf_sigstack
+ .quad sys_recvmsg
+ .quad sys_sendmsg
+ .quad alpha_ni_syscall /* 115 */
+ .quad osf_gettimeofday
+ .quad osf_getrusage
+ .quad sys_getsockopt
+ .quad alpha_ni_syscall
+#ifdef CONFIG_OSF4_COMPAT
+ .quad osf_readv /* 120 */
+ .quad osf_writev
+#else
+ .quad sys_readv /* 120 */
+ .quad sys_writev
+#endif
+ .quad osf_settimeofday
+ .quad sys_fchown
+ .quad sys_fchmod
+ .quad sys_recvfrom /* 125 */
+ .quad sys_setreuid
+ .quad sys_setregid
+ .quad sys_rename
+ .quad sys_truncate
+ .quad sys_ftruncate /* 130 */
+ .quad sys_flock
+ .quad sys_setgid
+ .quad sys_sendto
+ .quad sys_shutdown
+ .quad sys_socketpair /* 135 */
+ .quad sys_mkdir
+ .quad sys_rmdir
+ .quad osf_utimes
+ .quad alpha_ni_syscall
+ .quad alpha_ni_syscall /* 140 */
+ .quad sys_getpeername
+ .quad alpha_ni_syscall
+ .quad alpha_ni_syscall
+ .quad sys_getrlimit
+ .quad sys_setrlimit /* 145 */
+ .quad alpha_ni_syscall
+ .quad sys_setsid
+ .quad sys_quotactl
+ .quad alpha_ni_syscall
+ .quad sys_getsockname /* 150 */
+ .quad alpha_ni_syscall
+ .quad alpha_ni_syscall
+ .quad alpha_ni_syscall
+ .quad alpha_ni_syscall
+ .quad alpha_ni_syscall /* 155 */
+ .quad osf_sigaction
+ .quad alpha_ni_syscall
+ .quad alpha_ni_syscall
+ .quad osf_getdirentries
+ .quad osf_statfs /* 160 */
+ .quad osf_fstatfs
+ .quad alpha_ni_syscall
+ .quad alpha_ni_syscall
+ .quad alpha_ni_syscall
+ .quad osf_getdomainname /* 165 */
+ .quad sys_setdomainname
+ .quad alpha_ni_syscall
+ .quad alpha_ni_syscall
+ .quad alpha_ni_syscall
+ .quad alpha_ni_syscall /* 170 */
+ .quad alpha_ni_syscall
+ .quad alpha_ni_syscall
+ .quad alpha_ni_syscall
+ .quad alpha_ni_syscall
+ .quad alpha_ni_syscall /* 175 */
+ .quad alpha_ni_syscall
+ .quad alpha_ni_syscall
+ .quad alpha_ni_syscall
+ .quad alpha_ni_syscall
+ .quad alpha_ni_syscall /* 180 */
+ .quad alpha_ni_syscall
+ .quad alpha_ni_syscall
+ .quad alpha_ni_syscall
+ .quad alpha_ni_syscall
+ .quad alpha_ni_syscall /* 185 */
+ .quad alpha_ni_syscall
+ .quad alpha_ni_syscall
+ .quad alpha_ni_syscall
+ .quad alpha_ni_syscall
+ .quad alpha_ni_syscall /* 190 */
+ .quad alpha_ni_syscall
+ .quad alpha_ni_syscall
+ .quad alpha_ni_syscall
+ .quad alpha_ni_syscall
+ .quad alpha_ni_syscall /* 195 */
+ .quad alpha_ni_syscall
+ .quad alpha_ni_syscall
+ .quad alpha_ni_syscall
+ /* The OSF swapon has two extra arguments, but we ignore them. */
+ .quad sys_swapon
+ .quad sys_msgctl /* 200 */
+ .quad sys_msgget
+ .quad sys_msgrcv
+ .quad sys_msgsnd
+ .quad sys_semctl
+ .quad sys_semget /* 205 */
+ .quad sys_semop
+ .quad osf_utsname
+ .quad sys_lchown
+ .quad osf_shmat
+ .quad sys_shmctl /* 210 */
+ .quad sys_shmdt
+ .quad sys_shmget
+ .quad alpha_ni_syscall
+ .quad alpha_ni_syscall
+ .quad alpha_ni_syscall /* 215 */
+ .quad alpha_ni_syscall
+ .quad sys_msync
+ .quad alpha_ni_syscall
+ .quad alpha_ni_syscall
+ .quad alpha_ni_syscall /* 220 */
+ .quad alpha_ni_syscall
+ .quad alpha_ni_syscall
+ .quad alpha_ni_syscall
+ .quad alpha_ni_syscall
+ .quad alpha_ni_syscall /* 225 */
+ .quad alpha_ni_syscall
+ .quad alpha_ni_syscall
+ .quad alpha_ni_syscall
+ .quad alpha_ni_syscall
+ .quad alpha_ni_syscall /* 230 */
+ .quad alpha_ni_syscall
+ .quad alpha_ni_syscall
+ .quad sys_getpgid
+ .quad sys_getsid
+ .quad sys_sigaltstack /* 235 */
+ .quad alpha_ni_syscall
+ .quad alpha_ni_syscall
+ .quad alpha_ni_syscall
+ .quad alpha_ni_syscall
+ .quad alpha_ni_syscall /* 240 */
+ .quad osf_sysinfo
+ .quad alpha_ni_syscall
+ .quad alpha_ni_syscall
+ .quad osf_proplist_syscall
+ .quad alpha_ni_syscall /* 245 */
+ .quad alpha_ni_syscall
+ .quad alpha_ni_syscall
+ .quad alpha_ni_syscall
+ .quad alpha_ni_syscall
+ .quad alpha_ni_syscall /* 250 */
+ .quad osf_usleep_thread
+ .quad alpha_ni_syscall
+ .quad alpha_ni_syscall
+ .quad sys_sysfs
+ .quad alpha_ni_syscall /* 255 */
+ .quad osf_getsysinfo
+ .quad osf_setsysinfo
+ .quad alpha_ni_syscall
+ .quad alpha_ni_syscall
+ .quad alpha_ni_syscall /* 260 */
+ .quad alpha_ni_syscall
+ .quad alpha_ni_syscall
+ .quad alpha_ni_syscall
+ .quad alpha_ni_syscall
+ .quad alpha_ni_syscall /* 265 */
+ .quad alpha_ni_syscall
+ .quad alpha_ni_syscall
+ .quad alpha_ni_syscall
+ .quad alpha_ni_syscall
+ .quad alpha_ni_syscall /* 270 */
+ .quad alpha_ni_syscall
+ .quad alpha_ni_syscall
+ .quad alpha_ni_syscall
+ .quad alpha_ni_syscall
+ .quad alpha_ni_syscall /* 275 */
+ .quad alpha_ni_syscall
+ .quad alpha_ni_syscall
+ .quad alpha_ni_syscall
+ .quad alpha_ni_syscall
+ .quad alpha_ni_syscall /* 280 */
+ .quad alpha_ni_syscall
+ .quad alpha_ni_syscall
+ .quad alpha_ni_syscall
+ .quad alpha_ni_syscall
+ .quad alpha_ni_syscall /* 285 */
+ .quad alpha_ni_syscall
+ .quad alpha_ni_syscall
+ .quad alpha_ni_syscall
+ .quad alpha_ni_syscall
+ .quad alpha_ni_syscall /* 290 */
+ .quad alpha_ni_syscall
+ .quad alpha_ni_syscall
+ .quad alpha_ni_syscall
+ .quad alpha_ni_syscall
+ .quad alpha_ni_syscall /* 295 */
+ .quad alpha_ni_syscall
+ .quad alpha_ni_syscall
+ .quad alpha_ni_syscall
+ .quad alpha_ni_syscall
+/* linux-specific system calls start at 300 */
+ .quad sys_bdflush /* 300 */
+ .quad sys_sethae
+ .quad sys_mount
+ .quad sys_old_adjtimex
+ .quad sys_swapoff
+ .quad sys_getdents /* 305 */
+ .quad sys_ni_syscall /* 306: old create_module */
+ .quad sys_init_module
+ .quad sys_delete_module
+ .quad sys_ni_syscall /* 309: old get_kernel_syms */
+ .quad sys_syslog /* 310 */
+ .quad sys_reboot
+ .quad sys_clone
+ .quad sys_uselib
+ .quad sys_mlock
+ .quad sys_munlock /* 315 */
+ .quad sys_mlockall
+ .quad sys_munlockall
+ .quad sys_sysinfo
+ .quad sys_sysctl
+ .quad sys_ni_syscall /* 320 */
+ .quad sys_oldumount
+ .quad sys_swapon
+ .quad sys_times
+ .quad sys_personality
+ .quad sys_setfsuid /* 325 */
+ .quad sys_setfsgid
+ .quad sys_ustat
+ .quad sys_statfs
+ .quad sys_fstatfs
+ .quad sys_sched_setparam /* 330 */
+ .quad sys_sched_getparam
+ .quad sys_sched_setscheduler
+ .quad sys_sched_getscheduler
+ .quad sys_sched_yield
+ .quad sys_sched_get_priority_max /* 335 */
+ .quad sys_sched_get_priority_min
+ .quad sys_sched_rr_get_interval
+ .quad sys_ni_syscall /* sys_afs_syscall */
+ .quad sys_newuname
+ .quad sys_nanosleep /* 340 */
+ .quad sys_mremap
+ .quad sys_nfsservctl
+ .quad sys_setresuid
+ .quad sys_getresuid
+ .quad sys_pciconfig_read /* 345 */
+ .quad sys_pciconfig_write
+ .quad sys_ni_syscall /* 347: old query_module */
+ .quad sys_prctl
+ .quad sys_pread64
+ .quad sys_pwrite64 /* 350 */
+ .quad sys_rt_sigreturn
+ .quad sys_rt_sigaction
+ .quad sys_rt_sigprocmask
+ .quad sys_rt_sigpending
+ .quad sys_rt_sigtimedwait /* 355 */
+ .quad sys_rt_sigqueueinfo
+ .quad sys_rt_sigsuspend
+ .quad sys_select
+ .quad sys_gettimeofday
+ .quad sys_settimeofday /* 360 */
+ .quad sys_getitimer
+ .quad sys_setitimer
+ .quad sys_utimes
+ .quad sys_getrusage
+ .quad sys_wait4 /* 365 */
+ .quad sys_adjtimex
+ .quad sys_getcwd
+ .quad sys_capget
+ .quad sys_capset
+ .quad sys_sendfile64 /* 370 */
+ .quad sys_setresgid
+ .quad sys_getresgid
+ .quad sys_ni_syscall /* sys_dipc */
+ .quad sys_pivot_root
+ .quad sys_mincore /* 375 */
+ .quad sys_pciconfig_iobase
+ .quad sys_getdents64
+ .quad sys_gettid
+ .quad sys_readahead
+ .quad sys_ni_syscall /* 380 */
+ .quad sys_tkill
+ .quad sys_setxattr
+ .quad sys_lsetxattr
+ .quad sys_fsetxattr
+ .quad sys_getxattr /* 385 */
+ .quad sys_lgetxattr
+ .quad sys_fgetxattr
+ .quad sys_listxattr
+ .quad sys_llistxattr
+ .quad sys_flistxattr /* 390 */
+ .quad sys_removexattr
+ .quad sys_lremovexattr
+ .quad sys_fremovexattr
+ .quad sys_futex
+ .quad sys_sched_setaffinity /* 395 */
+ .quad sys_sched_getaffinity
+ .quad sys_ni_syscall /* 397, tux */
+ .quad sys_io_setup
+ .quad sys_io_destroy
+ .quad sys_io_getevents /* 400 */
+ .quad sys_io_submit
+ .quad sys_io_cancel
+ .quad sys_ni_syscall /* 403, sys_alloc_hugepages */
+ .quad sys_ni_syscall /* 404, sys_free_hugepages */
+ .quad sys_exit_group /* 405 */
+ .quad sys_lookup_dcookie
+ .quad sys_epoll_create
+ .quad sys_epoll_ctl
+ .quad sys_epoll_wait
+ .quad sys_remap_file_pages /* 410 */
+ .quad sys_set_tid_address
+ .quad sys_restart_syscall
+ .quad sys_fadvise64
+ .quad sys_timer_create
+ .quad sys_timer_settime /* 415 */
+ .quad sys_timer_gettime
+ .quad sys_timer_getoverrun
+ .quad sys_timer_delete
+ .quad sys_clock_settime
+ .quad sys_clock_gettime /* 420 */
+ .quad sys_clock_getres
+ .quad sys_clock_nanosleep
+ .quad sys_semtimedop
+ .quad sys_tgkill
+ .quad sys_stat64 /* 425 */
+ .quad sys_lstat64
+ .quad sys_fstat64
+ .quad sys_ni_syscall /* sys_vserver */
+ .quad sys_ni_syscall /* sys_mbind */
+ .quad sys_ni_syscall /* sys_get_mempolicy */
+ .quad sys_ni_syscall /* sys_set_mempolicy */
+ .quad sys_mq_open
+ .quad sys_mq_unlink
+ .quad sys_mq_timedsend
+ .quad sys_mq_timedreceive /* 435 */
+ .quad sys_mq_notify
+ .quad sys_mq_getsetattr
+ .quad sys_waitid
+
+ .size sys_call_table, . - sys_call_table
+ .type sys_call_table, @object
+
+/* Remember to update everything, kids. */
+.ifne (. - sys_call_table) - (NR_SYSCALLS * 8)
+.err
+.endif
diff --git a/arch/alpha/kernel/time.c b/arch/alpha/kernel/time.c
new file mode 100644
index 000000000000..8226c5cd788c
--- /dev/null
+++ b/arch/alpha/kernel/time.c
@@ -0,0 +1,591 @@
+/*
+ * linux/arch/alpha/kernel/time.c
+ *
+ * Copyright (C) 1991, 1992, 1995, 1999, 2000 Linus Torvalds
+ *
+ * This file contains the PC-specific time handling details:
+ * reading the RTC at bootup, etc..
+ * 1994-07-02 Alan Modra
+ * fixed set_rtc_mmss, fixed time.year for >= 2000, new mktime
+ * 1995-03-26 Markus Kuhn
+ * fixed 500 ms bug at call to set_rtc_mmss, fixed DS12887
+ * precision CMOS clock update
+ * 1997-09-10 Updated NTP code according to technical memorandum Jan '96
+ * "A Kernel Model for Precision Timekeeping" by Dave Mills
+ * 1997-01-09 Adrian Sun
+ * use interval timer if CONFIG_RTC=y
+ * 1997-10-29 John Bowman (bowman@math.ualberta.ca)
+ * fixed tick loss calculation in timer_interrupt
+ * (round system clock to nearest tick instead of truncating)
+ * fixed algorithm in time_init for getting time from CMOS clock
+ * 1999-04-16 Thorsten Kranzkowski (dl8bcu@gmx.net)
+ * fixed algorithm in do_gettimeofday() for calculating the precise time
+ * from processor cycle counter (now taking lost_ticks into account)
+ * 2000-08-13 Jan-Benedict Glaw <jbglaw@lug-owl.de>
+ * Fixed time_init to be aware of epoches != 1900. This prevents
+ * booting up in 2048 for me;) Code is stolen from rtc.c.
+ * 2003-06-03 R. Scott Bailey <scott.bailey@eds.com>
+ * Tighten sanity in time_init from 1% (10,000 PPM) to 250 PPM
+ */
+#include <linux/config.h>
+#include <linux/errno.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/param.h>
+#include <linux/string.h>
+#include <linux/mm.h>
+#include <linux/delay.h>
+#include <linux/ioport.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+#include <linux/init.h>
+#include <linux/bcd.h>
+#include <linux/profile.h>
+
+#include <asm/uaccess.h>
+#include <asm/io.h>
+#include <asm/hwrpb.h>
+#include <asm/8253pit.h>
+
+#include <linux/mc146818rtc.h>
+#include <linux/time.h>
+#include <linux/timex.h>
+
+#include "proto.h"
+#include "irq_impl.h"
+
+u64 jiffies_64 = INITIAL_JIFFIES;
+
+EXPORT_SYMBOL(jiffies_64);
+
+extern unsigned long wall_jiffies; /* kernel/timer.c */
+
+static int set_rtc_mmss(unsigned long);
+
+DEFINE_SPINLOCK(rtc_lock);
+
+#define TICK_SIZE (tick_nsec / 1000)
+
+/*
+ * Shift amount by which scaled_ticks_per_cycle is scaled. Shifting
+ * by 48 gives us 16 bits for HZ while keeping the accuracy good even
+ * for large CPU clock rates.
+ */
+#define FIX_SHIFT 48
+
+/* lump static variables together for more efficient access: */
+static struct {
+ /* cycle counter last time it got invoked */
+ __u32 last_time;
+ /* ticks/cycle * 2^48 */
+ unsigned long scaled_ticks_per_cycle;
+ /* last time the CMOS clock got updated */
+ time_t last_rtc_update;
+ /* partial unused tick */
+ unsigned long partial_tick;
+} state;
+
+unsigned long est_cycle_freq;
+
+
+static inline __u32 rpcc(void)
+{
+ __u32 result;
+ asm volatile ("rpcc %0" : "=r"(result));
+ return result;
+}
+
+/*
+ * Scheduler clock - returns current time in nanosec units.
+ *
+ * Copied from ARM code for expediency... ;-}
+ */
+unsigned long long sched_clock(void)
+{
+ return (unsigned long long)jiffies * (1000000000 / HZ);
+}
+
+
+/*
+ * timer_interrupt() needs to keep up the real-time clock,
+ * as well as call the "do_timer()" routine every clocktick
+ */
+irqreturn_t timer_interrupt(int irq, void *dev, struct pt_regs * regs)
+{
+ unsigned long delta;
+ __u32 now;
+ long nticks;
+
+#ifndef CONFIG_SMP
+ /* Not SMP, do kernel PC profiling here. */
+ profile_tick(CPU_PROFILING, regs);
+#endif
+
+ write_seqlock(&xtime_lock);
+
+ /*
+ * Calculate how many ticks have passed since the last update,
+ * including any previous partial leftover. Save any resulting
+ * fraction for the next pass.
+ */
+ now = rpcc();
+ delta = now - state.last_time;
+ state.last_time = now;
+ delta = delta * state.scaled_ticks_per_cycle + state.partial_tick;
+ state.partial_tick = delta & ((1UL << FIX_SHIFT) - 1);
+ nticks = delta >> FIX_SHIFT;
+
+ while (nticks > 0) {
+ do_timer(regs);
+#ifndef CONFIG_SMP
+ update_process_times(user_mode(regs));
+#endif
+ nticks--;
+ }
+
+ /*
+ * If we have an externally synchronized Linux clock, then update
+ * CMOS clock accordingly every ~11 minutes. Set_rtc_mmss() has to be
+ * called as close as possible to 500 ms before the new second starts.
+ */
+ if ((time_status & STA_UNSYNC) == 0
+ && xtime.tv_sec > state.last_rtc_update + 660
+ && xtime.tv_nsec >= 500000 - ((unsigned) TICK_SIZE) / 2
+ && xtime.tv_nsec <= 500000 + ((unsigned) TICK_SIZE) / 2) {
+ int tmp = set_rtc_mmss(xtime.tv_sec);
+ state.last_rtc_update = xtime.tv_sec - (tmp ? 600 : 0);
+ }
+
+ write_sequnlock(&xtime_lock);
+ return IRQ_HANDLED;
+}
+
+void
+common_init_rtc(void)
+{
+ unsigned char x;
+
+ /* Reset periodic interrupt frequency. */
+ x = CMOS_READ(RTC_FREQ_SELECT) & 0x3f;
+ /* Test includes known working values on various platforms
+ where 0x26 is wrong; we refuse to change those. */
+ if (x != 0x26 && x != 0x25 && x != 0x19 && x != 0x06) {
+ printk("Setting RTC_FREQ to 1024 Hz (%x)\n", x);
+ CMOS_WRITE(0x26, RTC_FREQ_SELECT);
+ }
+
+ /* Turn on periodic interrupts. */
+ x = CMOS_READ(RTC_CONTROL);
+ if (!(x & RTC_PIE)) {
+ printk("Turning on RTC interrupts.\n");
+ x |= RTC_PIE;
+ x &= ~(RTC_AIE | RTC_UIE);
+ CMOS_WRITE(x, RTC_CONTROL);
+ }
+ (void) CMOS_READ(RTC_INTR_FLAGS);
+
+ outb(0x36, 0x43); /* pit counter 0: system timer */
+ outb(0x00, 0x40);
+ outb(0x00, 0x40);
+
+ outb(0xb6, 0x43); /* pit counter 2: speaker */
+ outb(0x31, 0x42);
+ outb(0x13, 0x42);
+
+ init_rtc_irq();
+}
+
+
+/* Validate a computed cycle counter result against the known bounds for
+ the given processor core. There's too much brokenness in the way of
+ timing hardware for any one method to work everywhere. :-(
+
+ Return 0 if the result cannot be trusted, otherwise return the argument. */
+
+static unsigned long __init
+validate_cc_value(unsigned long cc)
+{
+ static struct bounds {
+ unsigned int min, max;
+ } cpu_hz[] __initdata = {
+ [EV3_CPU] = { 50000000, 200000000 }, /* guess */
+ [EV4_CPU] = { 100000000, 300000000 },
+ [LCA4_CPU] = { 100000000, 300000000 }, /* guess */
+ [EV45_CPU] = { 200000000, 300000000 },
+ [EV5_CPU] = { 250000000, 433000000 },
+ [EV56_CPU] = { 333000000, 667000000 },
+ [PCA56_CPU] = { 400000000, 600000000 }, /* guess */
+ [PCA57_CPU] = { 500000000, 600000000 }, /* guess */
+ [EV6_CPU] = { 466000000, 600000000 },
+ [EV67_CPU] = { 600000000, 750000000 },
+ [EV68AL_CPU] = { 750000000, 940000000 },
+ [EV68CB_CPU] = { 1000000000, 1333333333 },
+ /* None of the following are shipping as of 2001-11-01. */
+ [EV68CX_CPU] = { 1000000000, 1700000000 }, /* guess */
+ [EV69_CPU] = { 1000000000, 1700000000 }, /* guess */
+ [EV7_CPU] = { 800000000, 1400000000 }, /* guess */
+ [EV79_CPU] = { 1000000000, 2000000000 }, /* guess */
+ };
+
+ /* Allow for some drift in the crystal. 10MHz is more than enough. */
+ const unsigned int deviation = 10000000;
+
+ struct percpu_struct *cpu;
+ unsigned int index;
+
+ cpu = (struct percpu_struct *)((char*)hwrpb + hwrpb->processor_offset);
+ index = cpu->type & 0xffffffff;
+
+ /* If index out of bounds, no way to validate. */
+ if (index >= sizeof(cpu_hz)/sizeof(cpu_hz[0]))
+ return cc;
+
+ /* If index contains no data, no way to validate. */
+ if (cpu_hz[index].max == 0)
+ return cc;
+
+ if (cc < cpu_hz[index].min - deviation
+ || cc > cpu_hz[index].max + deviation)
+ return 0;
+
+ return cc;
+}
+
+
+/*
+ * Calibrate CPU clock using legacy 8254 timer/counter. Stolen from
+ * arch/i386/time.c.
+ */
+
+#define CALIBRATE_LATCH 0xffff
+#define TIMEOUT_COUNT 0x100000
+
+static unsigned long __init
+calibrate_cc_with_pit(void)
+{
+ int cc, count = 0;
+
+ /* Set the Gate high, disable speaker */
+ outb((inb(0x61) & ~0x02) | 0x01, 0x61);
+
+ /*
+ * Now let's take care of CTC channel 2
+ *
+ * Set the Gate high, program CTC channel 2 for mode 0,
+ * (interrupt on terminal count mode), binary count,
+ * load 5 * LATCH count, (LSB and MSB) to begin countdown.
+ */
+ outb(0xb0, 0x43); /* binary, mode 0, LSB/MSB, Ch 2 */
+ outb(CALIBRATE_LATCH & 0xff, 0x42); /* LSB of count */
+ outb(CALIBRATE_LATCH >> 8, 0x42); /* MSB of count */
+
+ cc = rpcc();
+ do {
+ count++;
+ } while ((inb(0x61) & 0x20) == 0 && count < TIMEOUT_COUNT);
+ cc = rpcc() - cc;
+
+ /* Error: ECTCNEVERSET or ECPUTOOFAST. */
+ if (count <= 1 || count == TIMEOUT_COUNT)
+ return 0;
+
+ return ((long)cc * PIT_TICK_RATE) / (CALIBRATE_LATCH + 1);
+}
+
+/* The Linux interpretation of the CMOS clock register contents:
+ When the Update-In-Progress (UIP) flag goes from 1 to 0, the
+ RTC registers show the second which has precisely just started.
+ Let's hope other operating systems interpret the RTC the same way. */
+
+static unsigned long __init
+rpcc_after_update_in_progress(void)
+{
+ do { } while (!(CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP));
+ do { } while (CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP);
+
+ return rpcc();
+}
+
+void __init
+time_init(void)
+{
+ unsigned int year, mon, day, hour, min, sec, cc1, cc2, epoch;
+ unsigned long cycle_freq, tolerance;
+ long diff;
+
+ /* Calibrate CPU clock -- attempt #1. */
+ if (!est_cycle_freq)
+ est_cycle_freq = validate_cc_value(calibrate_cc_with_pit());
+
+ cc1 = rpcc_after_update_in_progress();
+
+ /* Calibrate CPU clock -- attempt #2. */
+ if (!est_cycle_freq) {
+ cc2 = rpcc_after_update_in_progress();
+ est_cycle_freq = validate_cc_value(cc2 - cc1);
+ cc1 = cc2;
+ }
+
+ cycle_freq = hwrpb->cycle_freq;
+ if (est_cycle_freq) {
+ /* If the given value is within 250 PPM of what we calculated,
+ accept it. Otherwise, use what we found. */
+ tolerance = cycle_freq / 4000;
+ diff = cycle_freq - est_cycle_freq;
+ if (diff < 0)
+ diff = -diff;
+ if ((unsigned long)diff > tolerance) {
+ cycle_freq = est_cycle_freq;
+ printk("HWRPB cycle frequency bogus. "
+ "Estimated %lu Hz\n", cycle_freq);
+ } else {
+ est_cycle_freq = 0;
+ }
+ } else if (! validate_cc_value (cycle_freq)) {
+ printk("HWRPB cycle frequency bogus, "
+ "and unable to estimate a proper value!\n");
+ }
+
+ /* From John Bowman <bowman@math.ualberta.ca>: allow the values
+ to settle, as the Update-In-Progress bit going low isn't good
+ enough on some hardware. 2ms is our guess; we haven't found
+ bogomips yet, but this is close on a 500Mhz box. */
+ __delay(1000000);
+
+ sec = CMOS_READ(RTC_SECONDS);
+ min = CMOS_READ(RTC_MINUTES);
+ hour = CMOS_READ(RTC_HOURS);
+ day = CMOS_READ(RTC_DAY_OF_MONTH);
+ mon = CMOS_READ(RTC_MONTH);
+ year = CMOS_READ(RTC_YEAR);
+
+ if (!(CMOS_READ(RTC_CONTROL) & RTC_DM_BINARY) || RTC_ALWAYS_BCD) {
+ BCD_TO_BIN(sec);
+ BCD_TO_BIN(min);
+ BCD_TO_BIN(hour);
+ BCD_TO_BIN(day);
+ BCD_TO_BIN(mon);
+ BCD_TO_BIN(year);
+ }
+
+ /* PC-like is standard; used for year >= 70 */
+ epoch = 1900;
+ if (year < 20)
+ epoch = 2000;
+ else if (year >= 20 && year < 48)
+ /* NT epoch */
+ epoch = 1980;
+ else if (year >= 48 && year < 70)
+ /* Digital UNIX epoch */
+ epoch = 1952;
+
+ printk(KERN_INFO "Using epoch = %d\n", epoch);
+
+ if ((year += epoch) < 1970)
+ year += 100;
+
+ xtime.tv_sec = mktime(year, mon, day, hour, min, sec);
+ xtime.tv_nsec = 0;
+
+ wall_to_monotonic.tv_sec -= xtime.tv_sec;
+ wall_to_monotonic.tv_nsec = 0;
+
+ if (HZ > (1<<16)) {
+ extern void __you_loose (void);
+ __you_loose();
+ }
+
+ state.last_time = cc1;
+ state.scaled_ticks_per_cycle
+ = ((unsigned long) HZ << FIX_SHIFT) / cycle_freq;
+ state.last_rtc_update = 0;
+ state.partial_tick = 0L;
+
+ /* Startup the timer source. */
+ alpha_mv.init_rtc();
+}
+
+/*
+ * Use the cycle counter to estimate an displacement from the last time
+ * tick. Unfortunately the Alpha designers made only the low 32-bits of
+ * the cycle counter active, so we overflow on 8.2 seconds on a 500MHz
+ * part. So we can't do the "find absolute time in terms of cycles" thing
+ * that the other ports do.
+ */
+void
+do_gettimeofday(struct timeval *tv)
+{
+ unsigned long flags;
+ unsigned long sec, usec, lost, seq;
+ unsigned long delta_cycles, delta_usec, partial_tick;
+
+ do {
+ seq = read_seqbegin_irqsave(&xtime_lock, flags);
+
+ delta_cycles = rpcc() - state.last_time;
+ sec = xtime.tv_sec;
+ usec = (xtime.tv_nsec / 1000);
+ partial_tick = state.partial_tick;
+ lost = jiffies - wall_jiffies;
+
+ } while (read_seqretry_irqrestore(&xtime_lock, seq, flags));
+
+#ifdef CONFIG_SMP
+ /* Until and unless we figure out how to get cpu cycle counters
+ in sync and keep them there, we can't use the rpcc tricks. */
+ delta_usec = lost * (1000000 / HZ);
+#else
+ /*
+ * usec = cycles * ticks_per_cycle * 2**48 * 1e6 / (2**48 * ticks)
+ * = cycles * (s_t_p_c) * 1e6 / (2**48 * ticks)
+ * = cycles * (s_t_p_c) * 15625 / (2**42 * ticks)
+ *
+ * which, given a 600MHz cycle and a 1024Hz tick, has a
+ * dynamic range of about 1.7e17, which is less than the
+ * 1.8e19 in an unsigned long, so we are safe from overflow.
+ *
+ * Round, but with .5 up always, since .5 to even is harder
+ * with no clear gain.
+ */
+
+ delta_usec = (delta_cycles * state.scaled_ticks_per_cycle
+ + partial_tick
+ + (lost << FIX_SHIFT)) * 15625;
+ delta_usec = ((delta_usec / ((1UL << (FIX_SHIFT-6-1)) * HZ)) + 1) / 2;
+#endif
+
+ usec += delta_usec;
+ if (usec >= 1000000) {
+ sec += 1;
+ usec -= 1000000;
+ }
+
+ tv->tv_sec = sec;
+ tv->tv_usec = usec;
+}
+
+EXPORT_SYMBOL(do_gettimeofday);
+
+int
+do_settimeofday(struct timespec *tv)
+{
+ time_t wtm_sec, sec = tv->tv_sec;
+ long wtm_nsec, nsec = tv->tv_nsec;
+ unsigned long delta_nsec;
+
+ if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
+ return -EINVAL;
+
+ write_seqlock_irq(&xtime_lock);
+
+ /* The offset that is added into time in do_gettimeofday above
+ must be subtracted out here to keep a coherent view of the
+ time. Without this, a full-tick error is possible. */
+
+#ifdef CONFIG_SMP
+ delta_nsec = (jiffies - wall_jiffies) * (NSEC_PER_SEC / HZ);
+#else
+ delta_nsec = rpcc() - state.last_time;
+ delta_nsec = (delta_nsec * state.scaled_ticks_per_cycle
+ + state.partial_tick
+ + ((jiffies - wall_jiffies) << FIX_SHIFT)) * 15625;
+ delta_nsec = ((delta_nsec / ((1UL << (FIX_SHIFT-6-1)) * HZ)) + 1) / 2;
+ delta_nsec *= 1000;
+#endif
+
+ nsec -= delta_nsec;
+
+ wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - sec);
+ wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - nsec);
+
+ set_normalized_timespec(&xtime, sec, nsec);
+ set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec);
+
+ time_adjust = 0; /* stop active adjtime() */
+ time_status |= STA_UNSYNC;
+ time_maxerror = NTP_PHASE_LIMIT;
+ time_esterror = NTP_PHASE_LIMIT;
+
+ write_sequnlock_irq(&xtime_lock);
+ clock_was_set();
+ return 0;
+}
+
+EXPORT_SYMBOL(do_settimeofday);
+
+
+/*
+ * In order to set the CMOS clock precisely, set_rtc_mmss has to be
+ * called 500 ms after the second nowtime has started, because when
+ * nowtime is written into the registers of the CMOS clock, it will
+ * jump to the next second precisely 500 ms later. Check the Motorola
+ * MC146818A or Dallas DS12887 data sheet for details.
+ *
+ * BUG: This routine does not handle hour overflow properly; it just
+ * sets the minutes. Usually you won't notice until after reboot!
+ */
+
+
+static int
+set_rtc_mmss(unsigned long nowtime)
+{
+ int retval = 0;
+ int real_seconds, real_minutes, cmos_minutes;
+ unsigned char save_control, save_freq_select;
+
+ /* irq are locally disabled here */
+ spin_lock(&rtc_lock);
+ /* Tell the clock it's being set */
+ save_control = CMOS_READ(RTC_CONTROL);
+ CMOS_WRITE((save_control|RTC_SET), RTC_CONTROL);
+
+ /* Stop and reset prescaler */
+ save_freq_select = CMOS_READ(RTC_FREQ_SELECT);
+ CMOS_WRITE((save_freq_select|RTC_DIV_RESET2), RTC_FREQ_SELECT);
+
+ cmos_minutes = CMOS_READ(RTC_MINUTES);
+ if (!(save_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD)
+ BCD_TO_BIN(cmos_minutes);
+
+ /*
+ * since we're only adjusting minutes and seconds,
+ * don't interfere with hour overflow. This avoids
+ * messing with unknown time zones but requires your
+ * RTC not to be off by more than 15 minutes
+ */
+ real_seconds = nowtime % 60;
+ real_minutes = nowtime / 60;
+ if (((abs(real_minutes - cmos_minutes) + 15)/30) & 1) {
+ /* correct for half hour time zone */
+ real_minutes += 30;
+ }
+ real_minutes %= 60;
+
+ if (abs(real_minutes - cmos_minutes) < 30) {
+ if (!(save_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD) {
+ BIN_TO_BCD(real_seconds);
+ BIN_TO_BCD(real_minutes);
+ }
+ CMOS_WRITE(real_seconds,RTC_SECONDS);
+ CMOS_WRITE(real_minutes,RTC_MINUTES);
+ } else {
+ printk(KERN_WARNING
+ "set_rtc_mmss: can't update from %d to %d\n",
+ cmos_minutes, real_minutes);
+ retval = -1;
+ }
+
+ /* The following flags have to be released exactly in this order,
+ * otherwise the DS12887 (popular MC146818A clone with integrated
+ * battery and quartz) will not reset the oscillator and will not
+ * update precisely 500 ms later. You won't find this mentioned in
+ * the Dallas Semiconductor data sheets, but who believes data
+ * sheets anyway ... -- Markus Kuhn
+ */
+ CMOS_WRITE(save_control, RTC_CONTROL);
+ CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT);
+ spin_unlock(&rtc_lock);
+
+ return retval;
+}
diff --git a/arch/alpha/kernel/traps.c b/arch/alpha/kernel/traps.c
new file mode 100644
index 000000000000..fd7bd17cc960
--- /dev/null
+++ b/arch/alpha/kernel/traps.c
@@ -0,0 +1,1092 @@
+/*
+ * arch/alpha/kernel/traps.c
+ *
+ * (C) Copyright 1994 Linus Torvalds
+ */
+
+/*
+ * This file initializes the trap entry points
+ */
+
+#include <linux/config.h>
+#include <linux/mm.h>
+#include <linux/sched.h>
+#include <linux/tty.h>
+#include <linux/delay.h>
+#include <linux/smp_lock.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/kallsyms.h>
+
+#include <asm/gentrap.h>
+#include <asm/uaccess.h>
+#include <asm/unaligned.h>
+#include <asm/sysinfo.h>
+#include <asm/hwrpb.h>
+#include <asm/mmu_context.h>
+
+#include "proto.h"
+
+/* Work-around for some SRMs which mishandle opDEC faults. */
+
+static int opDEC_fix;
+
+static void __init
+opDEC_check(void)
+{
+ __asm__ __volatile__ (
+ /* Load the address of... */
+ " br $16, 1f\n"
+ /* A stub instruction fault handler. Just add 4 to the
+ pc and continue. */
+ " ldq $16, 8($sp)\n"
+ " addq $16, 4, $16\n"
+ " stq $16, 8($sp)\n"
+ " call_pal %[rti]\n"
+ /* Install the instruction fault handler. */
+ "1: lda $17, 3\n"
+ " call_pal %[wrent]\n"
+ /* With that in place, the fault from the round-to-minf fp
+ insn will arrive either at the "lda 4" insn (bad) or one
+ past that (good). This places the correct fixup in %0. */
+ " lda %[fix], 0\n"
+ " cvttq/svm $f31,$f31\n"
+ " lda %[fix], 4"
+ : [fix] "=r" (opDEC_fix)
+ : [rti] "n" (PAL_rti), [wrent] "n" (PAL_wrent)
+ : "$0", "$1", "$16", "$17", "$22", "$23", "$24", "$25");
+
+ if (opDEC_fix)
+ printk("opDEC fixup enabled.\n");
+}
+
+void
+dik_show_regs(struct pt_regs *regs, unsigned long *r9_15)
+{
+ printk("pc = [<%016lx>] ra = [<%016lx>] ps = %04lx %s\n",
+ regs->pc, regs->r26, regs->ps, print_tainted());
+ print_symbol("pc is at %s\n", regs->pc);
+ print_symbol("ra is at %s\n", regs->r26 );
+ printk("v0 = %016lx t0 = %016lx t1 = %016lx\n",
+ regs->r0, regs->r1, regs->r2);
+ printk("t2 = %016lx t3 = %016lx t4 = %016lx\n",
+ regs->r3, regs->r4, regs->r5);
+ printk("t5 = %016lx t6 = %016lx t7 = %016lx\n",
+ regs->r6, regs->r7, regs->r8);
+
+ if (r9_15) {
+ printk("s0 = %016lx s1 = %016lx s2 = %016lx\n",
+ r9_15[9], r9_15[10], r9_15[11]);
+ printk("s3 = %016lx s4 = %016lx s5 = %016lx\n",
+ r9_15[12], r9_15[13], r9_15[14]);
+ printk("s6 = %016lx\n", r9_15[15]);
+ }
+
+ printk("a0 = %016lx a1 = %016lx a2 = %016lx\n",
+ regs->r16, regs->r17, regs->r18);
+ printk("a3 = %016lx a4 = %016lx a5 = %016lx\n",
+ regs->r19, regs->r20, regs->r21);
+ printk("t8 = %016lx t9 = %016lx t10= %016lx\n",
+ regs->r22, regs->r23, regs->r24);
+ printk("t11= %016lx pv = %016lx at = %016lx\n",
+ regs->r25, regs->r27, regs->r28);
+ printk("gp = %016lx sp = %p\n", regs->gp, regs+1);
+#if 0
+__halt();
+#endif
+}
+
+#if 0
+static char * ireg_name[] = {"v0", "t0", "t1", "t2", "t3", "t4", "t5", "t6",
+ "t7", "s0", "s1", "s2", "s3", "s4", "s5", "s6",
+ "a0", "a1", "a2", "a3", "a4", "a5", "t8", "t9",
+ "t10", "t11", "ra", "pv", "at", "gp", "sp", "zero"};
+#endif
+
+static void
+dik_show_code(unsigned int *pc)
+{
+ long i;
+
+ printk("Code:");
+ for (i = -6; i < 2; i++) {
+ unsigned int insn;
+ if (__get_user(insn, (unsigned int __user *)pc + i))
+ break;
+ printk("%c%08x%c", i ? ' ' : '<', insn, i ? ' ' : '>');
+ }
+ printk("\n");
+}
+
+static void
+dik_show_trace(unsigned long *sp)
+{
+ long i = 0;
+ printk("Trace:\n");
+ while (0x1ff8 & (unsigned long) sp) {
+ extern char _stext[], _etext[];
+ unsigned long tmp = *sp;
+ sp++;
+ if (tmp < (unsigned long) &_stext)
+ continue;
+ if (tmp >= (unsigned long) &_etext)
+ continue;
+ printk("[<%lx>]", tmp);
+ print_symbol(" %s", tmp);
+ printk("\n");
+ if (i > 40) {
+ printk(" ...");
+ break;
+ }
+ }
+ printk("\n");
+}
+
+static int kstack_depth_to_print = 24;
+
+void show_stack(struct task_struct *task, unsigned long *sp)
+{
+ unsigned long *stack;
+ int i;
+
+ /*
+ * debugging aid: "show_stack(NULL);" prints the
+ * back trace for this cpu.
+ */
+ if(sp==NULL)
+ sp=(unsigned long*)&sp;
+
+ stack = sp;
+ for(i=0; i < kstack_depth_to_print; i++) {
+ if (((long) stack & (THREAD_SIZE-1)) == 0)
+ break;
+ if (i && ((i % 4) == 0))
+ printk("\n ");
+ printk("%016lx ", *stack++);
+ }
+ printk("\n");
+ dik_show_trace(sp);
+}
+
+void dump_stack(void)
+{
+ show_stack(NULL, NULL);
+}
+
+EXPORT_SYMBOL(dump_stack);
+
+void
+die_if_kernel(char * str, struct pt_regs *regs, long err, unsigned long *r9_15)
+{
+ if (regs->ps & 8)
+ return;
+#ifdef CONFIG_SMP
+ printk("CPU %d ", hard_smp_processor_id());
+#endif
+ printk("%s(%d): %s %ld\n", current->comm, current->pid, str, err);
+ dik_show_regs(regs, r9_15);
+ dik_show_trace((unsigned long *)(regs+1));
+ dik_show_code((unsigned int *)regs->pc);
+
+ if (test_and_set_thread_flag (TIF_DIE_IF_KERNEL)) {
+ printk("die_if_kernel recursion detected.\n");
+ local_irq_enable();
+ while (1);
+ }
+ do_exit(SIGSEGV);
+}
+
+#ifndef CONFIG_MATHEMU
+static long dummy_emul(void) { return 0; }
+long (*alpha_fp_emul_imprecise)(struct pt_regs *regs, unsigned long writemask)
+ = (void *)dummy_emul;
+long (*alpha_fp_emul) (unsigned long pc)
+ = (void *)dummy_emul;
+#else
+long alpha_fp_emul_imprecise(struct pt_regs *regs, unsigned long writemask);
+long alpha_fp_emul (unsigned long pc);
+#endif
+
+asmlinkage void
+do_entArith(unsigned long summary, unsigned long write_mask,
+ struct pt_regs *regs)
+{
+ long si_code = FPE_FLTINV;
+ siginfo_t info;
+
+ if (summary & 1) {
+ /* Software-completion summary bit is set, so try to
+ emulate the instruction. If the processor supports
+ precise exceptions, we don't have to search. */
+ if (!amask(AMASK_PRECISE_TRAP))
+ si_code = alpha_fp_emul(regs->pc - 4);
+ else
+ si_code = alpha_fp_emul_imprecise(regs, write_mask);
+ if (si_code == 0)
+ return;
+ }
+ die_if_kernel("Arithmetic fault", regs, 0, NULL);
+
+ info.si_signo = SIGFPE;
+ info.si_errno = 0;
+ info.si_code = si_code;
+ info.si_addr = (void __user *) regs->pc;
+ send_sig_info(SIGFPE, &info, current);
+}
+
+asmlinkage void
+do_entIF(unsigned long type, struct pt_regs *regs)
+{
+ siginfo_t info;
+ int signo, code;
+
+ if (regs->ps == 0) {
+ if (type == 1) {
+ const unsigned int *data
+ = (const unsigned int *) regs->pc;
+ printk("Kernel bug at %s:%d\n",
+ (const char *)(data[1] | (long)data[2] << 32),
+ data[0]);
+ }
+ die_if_kernel((type == 1 ? "Kernel Bug" : "Instruction fault"),
+ regs, type, NULL);
+ }
+
+ switch (type) {
+ case 0: /* breakpoint */
+ info.si_signo = SIGTRAP;
+ info.si_errno = 0;
+ info.si_code = TRAP_BRKPT;
+ info.si_trapno = 0;
+ info.si_addr = (void __user *) regs->pc;
+
+ if (ptrace_cancel_bpt(current)) {
+ regs->pc -= 4; /* make pc point to former bpt */
+ }
+
+ send_sig_info(SIGTRAP, &info, current);
+ return;
+
+ case 1: /* bugcheck */
+ info.si_signo = SIGTRAP;
+ info.si_errno = 0;
+ info.si_code = __SI_FAULT;
+ info.si_addr = (void __user *) regs->pc;
+ info.si_trapno = 0;
+ send_sig_info(SIGTRAP, &info, current);
+ return;
+
+ case 2: /* gentrap */
+ info.si_addr = (void __user *) regs->pc;
+ info.si_trapno = regs->r16;
+ switch ((long) regs->r16) {
+ case GEN_INTOVF:
+ signo = SIGFPE;
+ code = FPE_INTOVF;
+ break;
+ case GEN_INTDIV:
+ signo = SIGFPE;
+ code = FPE_INTDIV;
+ break;
+ case GEN_FLTOVF:
+ signo = SIGFPE;
+ code = FPE_FLTOVF;
+ break;
+ case GEN_FLTDIV:
+ signo = SIGFPE;
+ code = FPE_FLTDIV;
+ break;
+ case GEN_FLTUND:
+ signo = SIGFPE;
+ code = FPE_FLTUND;
+ break;
+ case GEN_FLTINV:
+ signo = SIGFPE;
+ code = FPE_FLTINV;
+ break;
+ case GEN_FLTINE:
+ signo = SIGFPE;
+ code = FPE_FLTRES;
+ break;
+ case GEN_ROPRAND:
+ signo = SIGFPE;
+ code = __SI_FAULT;
+ break;
+
+ case GEN_DECOVF:
+ case GEN_DECDIV:
+ case GEN_DECINV:
+ case GEN_ASSERTERR:
+ case GEN_NULPTRERR:
+ case GEN_STKOVF:
+ case GEN_STRLENERR:
+ case GEN_SUBSTRERR:
+ case GEN_RANGERR:
+ case GEN_SUBRNG:
+ case GEN_SUBRNG1:
+ case GEN_SUBRNG2:
+ case GEN_SUBRNG3:
+ case GEN_SUBRNG4:
+ case GEN_SUBRNG5:
+ case GEN_SUBRNG6:
+ case GEN_SUBRNG7:
+ default:
+ signo = SIGTRAP;
+ code = __SI_FAULT;
+ break;
+ }
+
+ info.si_signo = signo;
+ info.si_errno = 0;
+ info.si_code = code;
+ info.si_addr = (void __user *) regs->pc;
+ send_sig_info(signo, &info, current);
+ return;
+
+ case 4: /* opDEC */
+ if (implver() == IMPLVER_EV4) {
+ long si_code;
+
+ /* The some versions of SRM do not handle
+ the opDEC properly - they return the PC of the
+ opDEC fault, not the instruction after as the
+ Alpha architecture requires. Here we fix it up.
+ We do this by intentionally causing an opDEC
+ fault during the boot sequence and testing if
+ we get the correct PC. If not, we set a flag
+ to correct it every time through. */
+ regs->pc += opDEC_fix;
+
+ /* EV4 does not implement anything except normal
+ rounding. Everything else will come here as
+ an illegal instruction. Emulate them. */
+ si_code = alpha_fp_emul(regs->pc - 4);
+ if (si_code == 0)
+ return;
+ if (si_code > 0) {
+ info.si_signo = SIGFPE;
+ info.si_errno = 0;
+ info.si_code = si_code;
+ info.si_addr = (void __user *) regs->pc;
+ send_sig_info(SIGFPE, &info, current);
+ return;
+ }
+ }
+ break;
+
+ case 3: /* FEN fault */
+ /* Irritating users can call PAL_clrfen to disable the
+ FPU for the process. The kernel will then trap in
+ do_switch_stack and undo_switch_stack when we try
+ to save and restore the FP registers.
+
+ Given that GCC by default generates code that uses the
+ FP registers, PAL_clrfen is not useful except for DoS
+ attacks. So turn the bleeding FPU back on and be done
+ with it. */
+ current_thread_info()->pcb.flags |= 1;
+ __reload_thread(&current_thread_info()->pcb);
+ return;
+
+ case 5: /* illoc */
+ default: /* unexpected instruction-fault type */
+ ;
+ }
+
+ info.si_signo = SIGILL;
+ info.si_errno = 0;
+ info.si_code = ILL_ILLOPC;
+ info.si_addr = (void __user *) regs->pc;
+ send_sig_info(SIGILL, &info, current);
+}
+
+/* There is an ifdef in the PALcode in MILO that enables a
+ "kernel debugging entry point" as an unprivileged call_pal.
+
+ We don't want to have anything to do with it, but unfortunately
+ several versions of MILO included in distributions have it enabled,
+ and if we don't put something on the entry point we'll oops. */
+
+asmlinkage void
+do_entDbg(struct pt_regs *regs)
+{
+ siginfo_t info;
+
+ die_if_kernel("Instruction fault", regs, 0, NULL);
+
+ info.si_signo = SIGILL;
+ info.si_errno = 0;
+ info.si_code = ILL_ILLOPC;
+ info.si_addr = (void __user *) regs->pc;
+ force_sig_info(SIGILL, &info, current);
+}
+
+
+/*
+ * entUna has a different register layout to be reasonably simple. It
+ * needs access to all the integer registers (the kernel doesn't use
+ * fp-regs), and it needs to have them in order for simpler access.
+ *
+ * Due to the non-standard register layout (and because we don't want
+ * to handle floating-point regs), user-mode unaligned accesses are
+ * handled separately by do_entUnaUser below.
+ *
+ * Oh, btw, we don't handle the "gp" register correctly, but if we fault
+ * on a gp-register unaligned load/store, something is _very_ wrong
+ * in the kernel anyway..
+ */
+struct allregs {
+ unsigned long regs[32];
+ unsigned long ps, pc, gp, a0, a1, a2;
+};
+
+struct unaligned_stat {
+ unsigned long count, va, pc;
+} unaligned[2];
+
+
+/* Macro for exception fixup code to access integer registers. */
+#define una_reg(r) (regs.regs[(r) >= 16 && (r) <= 18 ? (r)+19 : (r)])
+
+
+asmlinkage void
+do_entUna(void * va, unsigned long opcode, unsigned long reg,
+ unsigned long a3, unsigned long a4, unsigned long a5,
+ struct allregs regs)
+{
+ long error, tmp1, tmp2, tmp3, tmp4;
+ unsigned long pc = regs.pc - 4;
+ const struct exception_table_entry *fixup;
+
+ unaligned[0].count++;
+ unaligned[0].va = (unsigned long) va;
+ unaligned[0].pc = pc;
+
+ /* We don't want to use the generic get/put unaligned macros as
+ we want to trap exceptions. Only if we actually get an
+ exception will we decide whether we should have caught it. */
+
+ switch (opcode) {
+ case 0x0c: /* ldwu */
+ __asm__ __volatile__(
+ "1: ldq_u %1,0(%3)\n"
+ "2: ldq_u %2,1(%3)\n"
+ " extwl %1,%3,%1\n"
+ " extwh %2,%3,%2\n"
+ "3:\n"
+ ".section __ex_table,\"a\"\n"
+ " .long 1b - .\n"
+ " lda %1,3b-1b(%0)\n"
+ " .long 2b - .\n"
+ " lda %2,3b-2b(%0)\n"
+ ".previous"
+ : "=r"(error), "=&r"(tmp1), "=&r"(tmp2)
+ : "r"(va), "0"(0));
+ if (error)
+ goto got_exception;
+ una_reg(reg) = tmp1|tmp2;
+ return;
+
+ case 0x28: /* ldl */
+ __asm__ __volatile__(
+ "1: ldq_u %1,0(%3)\n"
+ "2: ldq_u %2,3(%3)\n"
+ " extll %1,%3,%1\n"
+ " extlh %2,%3,%2\n"
+ "3:\n"
+ ".section __ex_table,\"a\"\n"
+ " .long 1b - .\n"
+ " lda %1,3b-1b(%0)\n"
+ " .long 2b - .\n"
+ " lda %2,3b-2b(%0)\n"
+ ".previous"
+ : "=r"(error), "=&r"(tmp1), "=&r"(tmp2)
+ : "r"(va), "0"(0));
+ if (error)
+ goto got_exception;
+ una_reg(reg) = (int)(tmp1|tmp2);
+ return;
+
+ case 0x29: /* ldq */
+ __asm__ __volatile__(
+ "1: ldq_u %1,0(%3)\n"
+ "2: ldq_u %2,7(%3)\n"
+ " extql %1,%3,%1\n"
+ " extqh %2,%3,%2\n"
+ "3:\n"
+ ".section __ex_table,\"a\"\n"
+ " .long 1b - .\n"
+ " lda %1,3b-1b(%0)\n"
+ " .long 2b - .\n"
+ " lda %2,3b-2b(%0)\n"
+ ".previous"
+ : "=r"(error), "=&r"(tmp1), "=&r"(tmp2)
+ : "r"(va), "0"(0));
+ if (error)
+ goto got_exception;
+ una_reg(reg) = tmp1|tmp2;
+ return;
+
+ /* Note that the store sequences do not indicate that they change
+ memory because it _should_ be affecting nothing in this context.
+ (Otherwise we have other, much larger, problems.) */
+ case 0x0d: /* stw */
+ __asm__ __volatile__(
+ "1: ldq_u %2,1(%5)\n"
+ "2: ldq_u %1,0(%5)\n"
+ " inswh %6,%5,%4\n"
+ " inswl %6,%5,%3\n"
+ " mskwh %2,%5,%2\n"
+ " mskwl %1,%5,%1\n"
+ " or %2,%4,%2\n"
+ " or %1,%3,%1\n"
+ "3: stq_u %2,1(%5)\n"
+ "4: stq_u %1,0(%5)\n"
+ "5:\n"
+ ".section __ex_table,\"a\"\n"
+ " .long 1b - .\n"
+ " lda %2,5b-1b(%0)\n"
+ " .long 2b - .\n"
+ " lda %1,5b-2b(%0)\n"
+ " .long 3b - .\n"
+ " lda $31,5b-3b(%0)\n"
+ " .long 4b - .\n"
+ " lda $31,5b-4b(%0)\n"
+ ".previous"
+ : "=r"(error), "=&r"(tmp1), "=&r"(tmp2),
+ "=&r"(tmp3), "=&r"(tmp4)
+ : "r"(va), "r"(una_reg(reg)), "0"(0));
+ if (error)
+ goto got_exception;
+ return;
+
+ case 0x2c: /* stl */
+ __asm__ __volatile__(
+ "1: ldq_u %2,3(%5)\n"
+ "2: ldq_u %1,0(%5)\n"
+ " inslh %6,%5,%4\n"
+ " insll %6,%5,%3\n"
+ " msklh %2,%5,%2\n"
+ " mskll %1,%5,%1\n"
+ " or %2,%4,%2\n"
+ " or %1,%3,%1\n"
+ "3: stq_u %2,3(%5)\n"
+ "4: stq_u %1,0(%5)\n"
+ "5:\n"
+ ".section __ex_table,\"a\"\n"
+ " .long 1b - .\n"
+ " lda %2,5b-1b(%0)\n"
+ " .long 2b - .\n"
+ " lda %1,5b-2b(%0)\n"
+ " .long 3b - .\n"
+ " lda $31,5b-3b(%0)\n"
+ " .long 4b - .\n"
+ " lda $31,5b-4b(%0)\n"
+ ".previous"
+ : "=r"(error), "=&r"(tmp1), "=&r"(tmp2),
+ "=&r"(tmp3), "=&r"(tmp4)
+ : "r"(va), "r"(una_reg(reg)), "0"(0));
+ if (error)
+ goto got_exception;
+ return;
+
+ case 0x2d: /* stq */
+ __asm__ __volatile__(
+ "1: ldq_u %2,7(%5)\n"
+ "2: ldq_u %1,0(%5)\n"
+ " insqh %6,%5,%4\n"
+ " insql %6,%5,%3\n"
+ " mskqh %2,%5,%2\n"
+ " mskql %1,%5,%1\n"
+ " or %2,%4,%2\n"
+ " or %1,%3,%1\n"
+ "3: stq_u %2,7(%5)\n"
+ "4: stq_u %1,0(%5)\n"
+ "5:\n"
+ ".section __ex_table,\"a\"\n\t"
+ " .long 1b - .\n"
+ " lda %2,5b-1b(%0)\n"
+ " .long 2b - .\n"
+ " lda %1,5b-2b(%0)\n"
+ " .long 3b - .\n"
+ " lda $31,5b-3b(%0)\n"
+ " .long 4b - .\n"
+ " lda $31,5b-4b(%0)\n"
+ ".previous"
+ : "=r"(error), "=&r"(tmp1), "=&r"(tmp2),
+ "=&r"(tmp3), "=&r"(tmp4)
+ : "r"(va), "r"(una_reg(reg)), "0"(0));
+ if (error)
+ goto got_exception;
+ return;
+ }
+
+ lock_kernel();
+ printk("Bad unaligned kernel access at %016lx: %p %lx %ld\n",
+ pc, va, opcode, reg);
+ do_exit(SIGSEGV);
+
+got_exception:
+ /* Ok, we caught the exception, but we don't want it. Is there
+ someone to pass it along to? */
+ if ((fixup = search_exception_tables(pc)) != 0) {
+ unsigned long newpc;
+ newpc = fixup_exception(una_reg, fixup, pc);
+
+ printk("Forwarding unaligned exception at %lx (%lx)\n",
+ pc, newpc);
+
+ (&regs)->pc = newpc;
+ return;
+ }
+
+ /*
+ * Yikes! No one to forward the exception to.
+ * Since the registers are in a weird format, dump them ourselves.
+ */
+ lock_kernel();
+
+ printk("%s(%d): unhandled unaligned exception\n",
+ current->comm, current->pid);
+
+ printk("pc = [<%016lx>] ra = [<%016lx>] ps = %04lx\n",
+ pc, una_reg(26), regs.ps);
+ printk("r0 = %016lx r1 = %016lx r2 = %016lx\n",
+ una_reg(0), una_reg(1), una_reg(2));
+ printk("r3 = %016lx r4 = %016lx r5 = %016lx\n",
+ una_reg(3), una_reg(4), una_reg(5));
+ printk("r6 = %016lx r7 = %016lx r8 = %016lx\n",
+ una_reg(6), una_reg(7), una_reg(8));
+ printk("r9 = %016lx r10= %016lx r11= %016lx\n",
+ una_reg(9), una_reg(10), una_reg(11));
+ printk("r12= %016lx r13= %016lx r14= %016lx\n",
+ una_reg(12), una_reg(13), una_reg(14));
+ printk("r15= %016lx\n", una_reg(15));
+ printk("r16= %016lx r17= %016lx r18= %016lx\n",
+ una_reg(16), una_reg(17), una_reg(18));
+ printk("r19= %016lx r20= %016lx r21= %016lx\n",
+ una_reg(19), una_reg(20), una_reg(21));
+ printk("r22= %016lx r23= %016lx r24= %016lx\n",
+ una_reg(22), una_reg(23), una_reg(24));
+ printk("r25= %016lx r27= %016lx r28= %016lx\n",
+ una_reg(25), una_reg(27), una_reg(28));
+ printk("gp = %016lx sp = %p\n", regs.gp, &regs+1);
+
+ dik_show_code((unsigned int *)pc);
+ dik_show_trace((unsigned long *)(&regs+1));
+
+ if (test_and_set_thread_flag (TIF_DIE_IF_KERNEL)) {
+ printk("die_if_kernel recursion detected.\n");
+ local_irq_enable();
+ while (1);
+ }
+ do_exit(SIGSEGV);
+}
+
+/*
+ * Convert an s-floating point value in memory format to the
+ * corresponding value in register format. The exponent
+ * needs to be remapped to preserve non-finite values
+ * (infinities, not-a-numbers, denormals).
+ */
+static inline unsigned long
+s_mem_to_reg (unsigned long s_mem)
+{
+ unsigned long frac = (s_mem >> 0) & 0x7fffff;
+ unsigned long sign = (s_mem >> 31) & 0x1;
+ unsigned long exp_msb = (s_mem >> 30) & 0x1;
+ unsigned long exp_low = (s_mem >> 23) & 0x7f;
+ unsigned long exp;
+
+ exp = (exp_msb << 10) | exp_low; /* common case */
+ if (exp_msb) {
+ if (exp_low == 0x7f) {
+ exp = 0x7ff;
+ }
+ } else {
+ if (exp_low == 0x00) {
+ exp = 0x000;
+ } else {
+ exp |= (0x7 << 7);
+ }
+ }
+ return (sign << 63) | (exp << 52) | (frac << 29);
+}
+
+/*
+ * Convert an s-floating point value in register format to the
+ * corresponding value in memory format.
+ */
+static inline unsigned long
+s_reg_to_mem (unsigned long s_reg)
+{
+ return ((s_reg >> 62) << 30) | ((s_reg << 5) >> 34);
+}
+
+/*
+ * Handle user-level unaligned fault. Handling user-level unaligned
+ * faults is *extremely* slow and produces nasty messages. A user
+ * program *should* fix unaligned faults ASAP.
+ *
+ * Notice that we have (almost) the regular kernel stack layout here,
+ * so finding the appropriate registers is a little more difficult
+ * than in the kernel case.
+ *
+ * Finally, we handle regular integer load/stores only. In
+ * particular, load-linked/store-conditionally and floating point
+ * load/stores are not supported. The former make no sense with
+ * unaligned faults (they are guaranteed to fail) and I don't think
+ * the latter will occur in any decent program.
+ *
+ * Sigh. We *do* have to handle some FP operations, because GCC will
+ * uses them as temporary storage for integer memory to memory copies.
+ * However, we need to deal with stt/ldt and sts/lds only.
+ */
+
+#define OP_INT_MASK ( 1L << 0x28 | 1L << 0x2c /* ldl stl */ \
+ | 1L << 0x29 | 1L << 0x2d /* ldq stq */ \
+ | 1L << 0x0c | 1L << 0x0d /* ldwu stw */ \
+ | 1L << 0x0a | 1L << 0x0e ) /* ldbu stb */
+
+#define OP_WRITE_MASK ( 1L << 0x26 | 1L << 0x27 /* sts stt */ \
+ | 1L << 0x2c | 1L << 0x2d /* stl stq */ \
+ | 1L << 0x0d | 1L << 0x0e ) /* stw stb */
+
+#define R(x) ((size_t) &((struct pt_regs *)0)->x)
+
+static int unauser_reg_offsets[32] = {
+ R(r0), R(r1), R(r2), R(r3), R(r4), R(r5), R(r6), R(r7), R(r8),
+ /* r9 ... r15 are stored in front of regs. */
+ -56, -48, -40, -32, -24, -16, -8,
+ R(r16), R(r17), R(r18),
+ R(r19), R(r20), R(r21), R(r22), R(r23), R(r24), R(r25), R(r26),
+ R(r27), R(r28), R(gp),
+ 0, 0
+};
+
+#undef R
+
+asmlinkage void
+do_entUnaUser(void __user * va, unsigned long opcode,
+ unsigned long reg, struct pt_regs *regs)
+{
+ static int cnt = 0;
+ static long last_time = 0;
+
+ unsigned long tmp1, tmp2, tmp3, tmp4;
+ unsigned long fake_reg, *reg_addr = &fake_reg;
+ siginfo_t info;
+ long error;
+
+ /* Check the UAC bits to decide what the user wants us to do
+ with the unaliged access. */
+
+ if (!test_thread_flag (TIF_UAC_NOPRINT)) {
+ if (cnt >= 5 && jiffies - last_time > 5*HZ) {
+ cnt = 0;
+ }
+ if (++cnt < 5) {
+ printk("%s(%d): unaligned trap at %016lx: %p %lx %ld\n",
+ current->comm, current->pid,
+ regs->pc - 4, va, opcode, reg);
+ }
+ last_time = jiffies;
+ }
+ if (test_thread_flag (TIF_UAC_SIGBUS))
+ goto give_sigbus;
+ /* Not sure why you'd want to use this, but... */
+ if (test_thread_flag (TIF_UAC_NOFIX))
+ return;
+
+ /* Don't bother reading ds in the access check since we already
+ know that this came from the user. Also rely on the fact that
+ the page at TASK_SIZE is unmapped and so can't be touched anyway. */
+ if (!__access_ok((unsigned long)va, 0, USER_DS))
+ goto give_sigsegv;
+
+ ++unaligned[1].count;
+ unaligned[1].va = (unsigned long)va;
+ unaligned[1].pc = regs->pc - 4;
+
+ if ((1L << opcode) & OP_INT_MASK) {
+ /* it's an integer load/store */
+ if (reg < 30) {
+ reg_addr = (unsigned long *)
+ ((char *)regs + unauser_reg_offsets[reg]);
+ } else if (reg == 30) {
+ /* usp in PAL regs */
+ fake_reg = rdusp();
+ } else {
+ /* zero "register" */
+ fake_reg = 0;
+ }
+ }
+
+ /* We don't want to use the generic get/put unaligned macros as
+ we want to trap exceptions. Only if we actually get an
+ exception will we decide whether we should have caught it. */
+
+ switch (opcode) {
+ case 0x0c: /* ldwu */
+ __asm__ __volatile__(
+ "1: ldq_u %1,0(%3)\n"
+ "2: ldq_u %2,1(%3)\n"
+ " extwl %1,%3,%1\n"
+ " extwh %2,%3,%2\n"
+ "3:\n"
+ ".section __ex_table,\"a\"\n"
+ " .long 1b - .\n"
+ " lda %1,3b-1b(%0)\n"
+ " .long 2b - .\n"
+ " lda %2,3b-2b(%0)\n"
+ ".previous"
+ : "=r"(error), "=&r"(tmp1), "=&r"(tmp2)
+ : "r"(va), "0"(0));
+ if (error)
+ goto give_sigsegv;
+ *reg_addr = tmp1|tmp2;
+ break;
+
+ case 0x22: /* lds */
+ __asm__ __volatile__(
+ "1: ldq_u %1,0(%3)\n"
+ "2: ldq_u %2,3(%3)\n"
+ " extll %1,%3,%1\n"
+ " extlh %2,%3,%2\n"
+ "3:\n"
+ ".section __ex_table,\"a\"\n"
+ " .long 1b - .\n"
+ " lda %1,3b-1b(%0)\n"
+ " .long 2b - .\n"
+ " lda %2,3b-2b(%0)\n"
+ ".previous"
+ : "=r"(error), "=&r"(tmp1), "=&r"(tmp2)
+ : "r"(va), "0"(0));
+ if (error)
+ goto give_sigsegv;
+ alpha_write_fp_reg(reg, s_mem_to_reg((int)(tmp1|tmp2)));
+ return;
+
+ case 0x23: /* ldt */
+ __asm__ __volatile__(
+ "1: ldq_u %1,0(%3)\n"
+ "2: ldq_u %2,7(%3)\n"
+ " extql %1,%3,%1\n"
+ " extqh %2,%3,%2\n"
+ "3:\n"
+ ".section __ex_table,\"a\"\n"
+ " .long 1b - .\n"
+ " lda %1,3b-1b(%0)\n"
+ " .long 2b - .\n"
+ " lda %2,3b-2b(%0)\n"
+ ".previous"
+ : "=r"(error), "=&r"(tmp1), "=&r"(tmp2)
+ : "r"(va), "0"(0));
+ if (error)
+ goto give_sigsegv;
+ alpha_write_fp_reg(reg, tmp1|tmp2);
+ return;
+
+ case 0x28: /* ldl */
+ __asm__ __volatile__(
+ "1: ldq_u %1,0(%3)\n"
+ "2: ldq_u %2,3(%3)\n"
+ " extll %1,%3,%1\n"
+ " extlh %2,%3,%2\n"
+ "3:\n"
+ ".section __ex_table,\"a\"\n"
+ " .long 1b - .\n"
+ " lda %1,3b-1b(%0)\n"
+ " .long 2b - .\n"
+ " lda %2,3b-2b(%0)\n"
+ ".previous"
+ : "=r"(error), "=&r"(tmp1), "=&r"(tmp2)
+ : "r"(va), "0"(0));
+ if (error)
+ goto give_sigsegv;
+ *reg_addr = (int)(tmp1|tmp2);
+ break;
+
+ case 0x29: /* ldq */
+ __asm__ __volatile__(
+ "1: ldq_u %1,0(%3)\n"
+ "2: ldq_u %2,7(%3)\n"
+ " extql %1,%3,%1\n"
+ " extqh %2,%3,%2\n"
+ "3:\n"
+ ".section __ex_table,\"a\"\n"
+ " .long 1b - .\n"
+ " lda %1,3b-1b(%0)\n"
+ " .long 2b - .\n"
+ " lda %2,3b-2b(%0)\n"
+ ".previous"
+ : "=r"(error), "=&r"(tmp1), "=&r"(tmp2)
+ : "r"(va), "0"(0));
+ if (error)
+ goto give_sigsegv;
+ *reg_addr = tmp1|tmp2;
+ break;
+
+ /* Note that the store sequences do not indicate that they change
+ memory because it _should_ be affecting nothing in this context.
+ (Otherwise we have other, much larger, problems.) */
+ case 0x0d: /* stw */
+ __asm__ __volatile__(
+ "1: ldq_u %2,1(%5)\n"
+ "2: ldq_u %1,0(%5)\n"
+ " inswh %6,%5,%4\n"
+ " inswl %6,%5,%3\n"
+ " mskwh %2,%5,%2\n"
+ " mskwl %1,%5,%1\n"
+ " or %2,%4,%2\n"
+ " or %1,%3,%1\n"
+ "3: stq_u %2,1(%5)\n"
+ "4: stq_u %1,0(%5)\n"
+ "5:\n"
+ ".section __ex_table,\"a\"\n"
+ " .long 1b - .\n"
+ " lda %2,5b-1b(%0)\n"
+ " .long 2b - .\n"
+ " lda %1,5b-2b(%0)\n"
+ " .long 3b - .\n"
+ " lda $31,5b-3b(%0)\n"
+ " .long 4b - .\n"
+ " lda $31,5b-4b(%0)\n"
+ ".previous"
+ : "=r"(error), "=&r"(tmp1), "=&r"(tmp2),
+ "=&r"(tmp3), "=&r"(tmp4)
+ : "r"(va), "r"(*reg_addr), "0"(0));
+ if (error)
+ goto give_sigsegv;
+ return;
+
+ case 0x26: /* sts */
+ fake_reg = s_reg_to_mem(alpha_read_fp_reg(reg));
+ /* FALLTHRU */
+
+ case 0x2c: /* stl */
+ __asm__ __volatile__(
+ "1: ldq_u %2,3(%5)\n"
+ "2: ldq_u %1,0(%5)\n"
+ " inslh %6,%5,%4\n"
+ " insll %6,%5,%3\n"
+ " msklh %2,%5,%2\n"
+ " mskll %1,%5,%1\n"
+ " or %2,%4,%2\n"
+ " or %1,%3,%1\n"
+ "3: stq_u %2,3(%5)\n"
+ "4: stq_u %1,0(%5)\n"
+ "5:\n"
+ ".section __ex_table,\"a\"\n"
+ " .long 1b - .\n"
+ " lda %2,5b-1b(%0)\n"
+ " .long 2b - .\n"
+ " lda %1,5b-2b(%0)\n"
+ " .long 3b - .\n"
+ " lda $31,5b-3b(%0)\n"
+ " .long 4b - .\n"
+ " lda $31,5b-4b(%0)\n"
+ ".previous"
+ : "=r"(error), "=&r"(tmp1), "=&r"(tmp2),
+ "=&r"(tmp3), "=&r"(tmp4)
+ : "r"(va), "r"(*reg_addr), "0"(0));
+ if (error)
+ goto give_sigsegv;
+ return;
+
+ case 0x27: /* stt */
+ fake_reg = alpha_read_fp_reg(reg);
+ /* FALLTHRU */
+
+ case 0x2d: /* stq */
+ __asm__ __volatile__(
+ "1: ldq_u %2,7(%5)\n"
+ "2: ldq_u %1,0(%5)\n"
+ " insqh %6,%5,%4\n"
+ " insql %6,%5,%3\n"
+ " mskqh %2,%5,%2\n"
+ " mskql %1,%5,%1\n"
+ " or %2,%4,%2\n"
+ " or %1,%3,%1\n"
+ "3: stq_u %2,7(%5)\n"
+ "4: stq_u %1,0(%5)\n"
+ "5:\n"
+ ".section __ex_table,\"a\"\n\t"
+ " .long 1b - .\n"
+ " lda %2,5b-1b(%0)\n"
+ " .long 2b - .\n"
+ " lda %1,5b-2b(%0)\n"
+ " .long 3b - .\n"
+ " lda $31,5b-3b(%0)\n"
+ " .long 4b - .\n"
+ " lda $31,5b-4b(%0)\n"
+ ".previous"
+ : "=r"(error), "=&r"(tmp1), "=&r"(tmp2),
+ "=&r"(tmp3), "=&r"(tmp4)
+ : "r"(va), "r"(*reg_addr), "0"(0));
+ if (error)
+ goto give_sigsegv;
+ return;
+
+ default:
+ /* What instruction were you trying to use, exactly? */
+ goto give_sigbus;
+ }
+
+ /* Only integer loads should get here; everyone else returns early. */
+ if (reg == 30)
+ wrusp(fake_reg);
+ return;
+
+give_sigsegv:
+ regs->pc -= 4; /* make pc point to faulting insn */
+ info.si_signo = SIGSEGV;
+ info.si_errno = 0;
+
+ /* We need to replicate some of the logic in mm/fault.c,
+ since we don't have access to the fault code in the
+ exception handling return path. */
+ if (!__access_ok((unsigned long)va, 0, USER_DS))
+ info.si_code = SEGV_ACCERR;
+ else {
+ struct mm_struct *mm = current->mm;
+ down_read(&mm->mmap_sem);
+ if (find_vma(mm, (unsigned long)va))
+ info.si_code = SEGV_ACCERR;
+ else
+ info.si_code = SEGV_MAPERR;
+ up_read(&mm->mmap_sem);
+ }
+ info.si_addr = va;
+ send_sig_info(SIGSEGV, &info, current);
+ return;
+
+give_sigbus:
+ regs->pc -= 4;
+ info.si_signo = SIGBUS;
+ info.si_errno = 0;
+ info.si_code = BUS_ADRALN;
+ info.si_addr = va;
+ send_sig_info(SIGBUS, &info, current);
+ return;
+}
+
+void __init
+trap_init(void)
+{
+ /* Tell PAL-code what global pointer we want in the kernel. */
+ register unsigned long gptr __asm__("$29");
+ wrkgp(gptr);
+
+ /* Hack for Multia (UDB) and JENSEN: some of their SRMs have
+ a bug in the handling of the opDEC fault. Fix it up if so. */
+ if (implver() == IMPLVER_EV4)
+ opDEC_check();
+
+ wrent(entArith, 1);
+ wrent(entMM, 2);
+ wrent(entIF, 3);
+ wrent(entUna, 4);
+ wrent(entSys, 5);
+ wrent(entDbg, 6);
+}
diff --git a/arch/alpha/kernel/vmlinux.lds.S b/arch/alpha/kernel/vmlinux.lds.S
new file mode 100644
index 000000000000..0922e0785ddb
--- /dev/null
+++ b/arch/alpha/kernel/vmlinux.lds.S
@@ -0,0 +1,149 @@
+#include <linux/config.h>
+#include <asm-generic/vmlinux.lds.h>
+
+OUTPUT_FORMAT("elf64-alpha")
+OUTPUT_ARCH(alpha)
+ENTRY(__start)
+PHDRS { kernel PT_LOAD ; }
+jiffies = jiffies_64;
+SECTIONS
+{
+#ifdef CONFIG_ALPHA_LEGACY_START_ADDRESS
+ . = 0xfffffc0000310000;
+#else
+ . = 0xfffffc0001010000;
+#endif
+
+ _text = .; /* Text and read-only data */
+ .text : {
+ *(.text)
+ SCHED_TEXT
+ LOCK_TEXT
+ *(.fixup)
+ *(.gnu.warning)
+ } :kernel
+ _etext = .; /* End of text section */
+
+ . = ALIGN(16);
+ __start___ex_table = .; /* Exception table */
+ __ex_table : { *(__ex_table) }
+ __stop___ex_table = .;
+
+ RODATA
+
+ /* Will be freed after init */
+ . = ALIGN(8192); /* Init code and data */
+ __init_begin = .;
+ .init.text : {
+ _sinittext = .;
+ *(.init.text)
+ _einittext = .;
+ }
+ .init.data : { *(.init.data) }
+
+ . = ALIGN(16);
+ __setup_start = .;
+ .init.setup : { *(.init.setup) }
+ __setup_end = .;
+
+ . = ALIGN(8);
+ __initcall_start = .;
+ .initcall.init : {
+ *(.initcall1.init)
+ *(.initcall2.init)
+ *(.initcall3.init)
+ *(.initcall4.init)
+ *(.initcall5.init)
+ *(.initcall6.init)
+ *(.initcall7.init)
+ }
+ __initcall_end = .;
+
+ . = ALIGN(8192);
+ __initramfs_start = .;
+ .init.ramfs : { *(.init.ramfs) }
+ __initramfs_end = .;
+
+ . = ALIGN(8);
+ .con_initcall.init : {
+ __con_initcall_start = .;
+ *(.con_initcall.init)
+ __con_initcall_end = .;
+ }
+
+ . = ALIGN(8);
+ SECURITY_INIT
+
+ . = ALIGN(64);
+ __per_cpu_start = .;
+ .data.percpu : { *(.data.percpu) }
+ __per_cpu_end = .;
+
+ . = ALIGN(2*8192);
+ __init_end = .;
+ /* Freed after init ends here */
+
+ /* Note 2 page alignment above. */
+ .data.init_thread : { *(.data.init_thread) }
+
+ . = ALIGN(8192);
+ .data.page_aligned : { *(.data.page_aligned) }
+
+ . = ALIGN(64);
+ .data.cacheline_aligned : { *(.data.cacheline_aligned) }
+
+ _data = .;
+ .data : { /* Data */
+ *(.data)
+ CONSTRUCTORS
+ }
+
+ .got : { *(.got) }
+ .sdata : { *(.sdata) }
+
+ _edata = .; /* End of data section */
+
+ __bss_start = .;
+ .sbss : { *(.sbss) *(.scommon) }
+ .bss : { *(.bss) *(COMMON) }
+ __bss_stop = .;
+
+ _end = .;
+
+ /* Sections to be discarded */
+ /DISCARD/ : { *(.exit.text) *(.exit.data) *(.exitcall.exit) }
+
+ .mdebug 0 : { *(.mdebug) }
+ .note 0 : { *(.note) }
+ .comment 0 : { *(.comment) }
+
+ /* Stabs debugging sections */
+ .stab 0 : { *(.stab) }
+ .stabstr 0 : { *(.stabstr) }
+ .stab.excl 0 : { *(.stab.excl) }
+ .stab.exclstr 0 : { *(.stab.exclstr) }
+ .stab.index 0 : { *(.stab.index) }
+ .stab.indexstr 0 : { *(.stab.indexstr) }
+ /* DWARF 1 */
+ .debug 0 : { *(.debug) }
+ .line 0 : { *(.line) }
+ /* GNU DWARF 1 extensions */
+ .debug_srcinfo 0 : { *(.debug_srcinfo) }
+ .debug_sfnames 0 : { *(.debug_sfnames) }
+ /* DWARF 1.1 and DWARF 2 */
+ .debug_aranges 0 : { *(.debug_aranges) }
+ .debug_pubnames 0 : { *(.debug_pubnames) }
+ /* DWARF 2 */
+ .debug_info 0 : { *(.debug_info) }
+ .debug_abbrev 0 : { *(.debug_abbrev) }
+ .debug_line 0 : { *(.debug_line) }
+ .debug_frame 0 : { *(.debug_frame) }
+ .debug_str 0 : { *(.debug_str) }
+ .debug_loc 0 : { *(.debug_loc) }
+ .debug_macinfo 0 : { *(.debug_macinfo) }
+ /* SGI/MIPS DWARF 2 extensions */
+ .debug_weaknames 0 : { *(.debug_weaknames) }
+ .debug_funcnames 0 : { *(.debug_funcnames) }
+ .debug_typenames 0 : { *(.debug_typenames) }
+ .debug_varnames 0 : { *(.debug_varnames) }
+}
diff --git a/arch/alpha/lib/Makefile b/arch/alpha/lib/Makefile
new file mode 100644
index 000000000000..21cf624d7329
--- /dev/null
+++ b/arch/alpha/lib/Makefile
@@ -0,0 +1,58 @@
+#
+# Makefile for alpha-specific library files..
+#
+
+EXTRA_AFLAGS := $(CFLAGS)
+EXTRA_CFLAGS := -Werror
+
+# Many of these routines have implementations tuned for ev6.
+# Choose them iff we're targeting ev6 specifically.
+ev6-$(CONFIG_ALPHA_EV6) := ev6-
+
+# Several make use of the cttz instruction introduced in ev67.
+ev67-$(CONFIG_ALPHA_EV67) := ev67-
+
+lib-y = __divqu.o __remqu.o __divlu.o __remlu.o \
+ udelay.o \
+ $(ev6-y)memset.o \
+ $(ev6-y)memcpy.o \
+ memmove.o \
+ checksum.o \
+ csum_partial_copy.o \
+ $(ev67-y)strlen.o \
+ $(ev67-y)strcat.o \
+ strcpy.o \
+ $(ev67-y)strncat.o \
+ strncpy.o \
+ $(ev6-y)stxcpy.o \
+ $(ev6-y)stxncpy.o \
+ $(ev67-y)strchr.o \
+ $(ev67-y)strrchr.o \
+ $(ev6-y)memchr.o \
+ $(ev6-y)copy_user.o \
+ $(ev6-y)clear_user.o \
+ $(ev6-y)strncpy_from_user.o \
+ $(ev67-y)strlen_user.o \
+ $(ev6-y)csum_ipv6_magic.o \
+ $(ev6-y)clear_page.o \
+ $(ev6-y)copy_page.o \
+ strcasecmp.o \
+ fpreg.o \
+ callback_srm.o srm_puts.o srm_printk.o
+
+lib-$(CONFIG_SMP) += dec_and_lock.o
+
+# The division routines are built from single source, with different defines.
+AFLAGS___divqu.o = -DDIV
+AFLAGS___remqu.o = -DREM
+AFLAGS___divlu.o = -DDIV -DINTSIZE
+AFLAGS___remlu.o = -DREM -DINTSIZE
+
+$(obj)/__divqu.o: $(obj)/$(ev6-y)divide.S
+ $(cmd_as_o_S)
+$(obj)/__remqu.o: $(obj)/$(ev6-y)divide.S
+ $(cmd_as_o_S)
+$(obj)/__divlu.o: $(obj)/$(ev6-y)divide.S
+ $(cmd_as_o_S)
+$(obj)/__remlu.o: $(obj)/$(ev6-y)divide.S
+ $(cmd_as_o_S)
diff --git a/arch/alpha/lib/callback_srm.S b/arch/alpha/lib/callback_srm.S
new file mode 100644
index 000000000000..0528acd0d9ad
--- /dev/null
+++ b/arch/alpha/lib/callback_srm.S
@@ -0,0 +1,104 @@
+/*
+ * arch/alpha/lib/callback_srm.S
+ */
+
+#include <linux/config.h>
+#include <asm/console.h>
+
+.text
+#define HWRPB_CRB_OFFSET 0xc0
+
+#if defined(CONFIG_ALPHA_SRM) || defined(CONFIG_ALPHA_GENERIC)
+.align 4
+srm_dispatch:
+#if defined(CONFIG_ALPHA_GENERIC)
+ ldl $4,alpha_using_srm
+ beq $4,nosrm
+#endif
+ ldq $0,hwrpb # gp is set up by CALLBACK macro.
+ ldl $25,0($25) # Pick up the wrapper data.
+ mov $20,$21 # Shift arguments right.
+ mov $19,$20
+ ldq $1,HWRPB_CRB_OFFSET($0)
+ mov $18,$19
+ mov $17,$18
+ mov $16,$17
+ addq $0,$1,$2 # CRB address
+ ldq $27,0($2) # DISPATCH procedure descriptor (VMS call std)
+ extwl $25,0,$16 # SRM callback function code
+ ldq $3,8($27) # call address
+ extwl $25,2,$25 # argument information (VMS calling std)
+ jmp ($3) # Return directly to caller of wrapper.
+
+.align 4
+.globl srm_fixup
+.ent srm_fixup
+srm_fixup:
+ ldgp $29,0($27)
+#if defined(CONFIG_ALPHA_GENERIC)
+ ldl $4,alpha_using_srm
+ beq $4,nosrm
+#endif
+ ldq $0,hwrpb
+ ldq $1,HWRPB_CRB_OFFSET($0)
+ addq $0,$1,$2 # CRB address
+ ldq $27,16($2) # VA of FIXUP procedure descriptor
+ ldq $3,8($27) # call address
+ lda $25,2($31) # two integer arguments
+ jmp ($3) # Return directly to caller of srm_fixup.
+.end srm_fixup
+
+#if defined(CONFIG_ALPHA_GENERIC)
+.align 3
+nosrm:
+ lda $0,-1($31)
+ ret
+#endif
+
+#define CALLBACK(NAME, CODE, ARG_CNT) \
+.align 4; .globl callback_##NAME; .ent callback_##NAME; callback_##NAME##: \
+ldgp $29,0($27); br $25,srm_dispatch; .word CODE, ARG_CNT; .end callback_##NAME
+
+#else /* defined(CONFIG_ALPHA_SRM) || defined(CONFIG_ALPHA_GENERIC) */
+
+#define CALLBACK(NAME, CODE, ARG_CNT) \
+.align 3; .globl callback_##NAME; .ent callback_##NAME; callback_##NAME##: \
+lda $0,-1($31); ret; .end callback_##NAME
+
+.align 3
+.globl srm_fixup
+.ent srm_fixup
+srm_fixup:
+ lda $0,-1($31)
+ ret
+.end srm_fixup
+#endif /* defined(CONFIG_ALPHA_SRM) || defined(CONFIG_ALPHA_GENERIC) */
+
+CALLBACK(puts, CCB_PUTS, 4)
+CALLBACK(open, CCB_OPEN, 3)
+CALLBACK(close, CCB_CLOSE, 2)
+CALLBACK(read, CCB_READ, 5)
+CALLBACK(open_console, CCB_OPEN_CONSOLE, 1)
+CALLBACK(close_console, CCB_CLOSE_CONSOLE, 1)
+CALLBACK(getenv, CCB_GET_ENV, 4)
+CALLBACK(setenv, CCB_SET_ENV, 4)
+CALLBACK(getc, CCB_GETC, 2)
+CALLBACK(reset_term, CCB_RESET_TERM, 2)
+CALLBACK(term_int, CCB_SET_TERM_INT, 3)
+CALLBACK(term_ctl, CCB_SET_TERM_CTL, 3)
+CALLBACK(process_keycode, CCB_PROCESS_KEYCODE, 3)
+CALLBACK(ioctl, CCB_IOCTL, 6)
+CALLBACK(write, CCB_WRITE, 5)
+CALLBACK(reset_env, CCB_RESET_ENV, 4)
+CALLBACK(save_env, CCB_SAVE_ENV, 1)
+CALLBACK(pswitch, CCB_PSWITCH, 3)
+CALLBACK(bios_emul, CCB_BIOS_EMUL, 5)
+
+.data
+__alpha_using_srm: # For use by bootpheader
+ .long 7 # value is not 1 for link debugging
+ .weak alpha_using_srm; alpha_using_srm = __alpha_using_srm
+__callback_init_done: # For use by bootpheader
+ .long 7 # value is not 1 for link debugging
+ .weak callback_init_done; callback_init_done = __callback_init_done
+
diff --git a/arch/alpha/lib/checksum.c b/arch/alpha/lib/checksum.c
new file mode 100644
index 000000000000..89044e6385fe
--- /dev/null
+++ b/arch/alpha/lib/checksum.c
@@ -0,0 +1,186 @@
+/*
+ * arch/alpha/lib/checksum.c
+ *
+ * This file contains network checksum routines that are better done
+ * in an architecture-specific manner due to speed..
+ * Comments in other versions indicate that the algorithms are from RFC1071
+ *
+ * accellerated versions (and 21264 assembly versions ) contributed by
+ * Rick Gorton <rick.gorton@alpha-processor.com>
+ */
+
+#include <linux/module.h>
+#include <linux/string.h>
+
+#include <asm/byteorder.h>
+
+static inline unsigned short from64to16(unsigned long x)
+{
+ /* Using extract instructions is a bit more efficient
+ than the original shift/bitmask version. */
+
+ union {
+ unsigned long ul;
+ unsigned int ui[2];
+ unsigned short us[4];
+ } in_v, tmp_v, out_v;
+
+ in_v.ul = x;
+ tmp_v.ul = (unsigned long) in_v.ui[0] + (unsigned long) in_v.ui[1];
+
+ /* Since the bits of tmp_v.sh[3] are going to always be zero,
+ we don't have to bother to add that in. */
+ out_v.ul = (unsigned long) tmp_v.us[0] + (unsigned long) tmp_v.us[1]
+ + (unsigned long) tmp_v.us[2];
+
+ /* Similarly, out_v.us[2] is always zero for the final add. */
+ return out_v.us[0] + out_v.us[1];
+}
+
+/*
+ * computes the checksum of the TCP/UDP pseudo-header
+ * returns a 16-bit checksum, already complemented.
+ */
+unsigned short int csum_tcpudp_magic(unsigned long saddr,
+ unsigned long daddr,
+ unsigned short len,
+ unsigned short proto,
+ unsigned int sum)
+{
+ return ~from64to16(saddr + daddr + sum +
+ ((unsigned long) ntohs(len) << 16) +
+ ((unsigned long) proto << 8));
+}
+
+unsigned int csum_tcpudp_nofold(unsigned long saddr,
+ unsigned long daddr,
+ unsigned short len,
+ unsigned short proto,
+ unsigned int sum)
+{
+ unsigned long result;
+
+ result = (saddr + daddr + sum +
+ ((unsigned long) ntohs(len) << 16) +
+ ((unsigned long) proto << 8));
+
+ /* Fold down to 32-bits so we don't lose in the typedef-less
+ network stack. */
+ /* 64 to 33 */
+ result = (result & 0xffffffff) + (result >> 32);
+ /* 33 to 32 */
+ result = (result & 0xffffffff) + (result >> 32);
+ return result;
+}
+
+/*
+ * Do a 64-bit checksum on an arbitrary memory area..
+ *
+ * This isn't a great routine, but it's not _horrible_ either. The
+ * inner loop could be unrolled a bit further, and there are better
+ * ways to do the carry, but this is reasonable.
+ */
+static inline unsigned long do_csum(const unsigned char * buff, int len)
+{
+ int odd, count;
+ unsigned long result = 0;
+
+ if (len <= 0)
+ goto out;
+ odd = 1 & (unsigned long) buff;
+ if (odd) {
+ result = *buff << 8;
+ len--;
+ buff++;
+ }
+ count = len >> 1; /* nr of 16-bit words.. */
+ if (count) {
+ if (2 & (unsigned long) buff) {
+ result += *(unsigned short *) buff;
+ count--;
+ len -= 2;
+ buff += 2;
+ }
+ count >>= 1; /* nr of 32-bit words.. */
+ if (count) {
+ if (4 & (unsigned long) buff) {
+ result += *(unsigned int *) buff;
+ count--;
+ len -= 4;
+ buff += 4;
+ }
+ count >>= 1; /* nr of 64-bit words.. */
+ if (count) {
+ unsigned long carry = 0;
+ do {
+ unsigned long w = *(unsigned long *) buff;
+ count--;
+ buff += 8;
+ result += carry;
+ result += w;
+ carry = (w > result);
+ } while (count);
+ result += carry;
+ result = (result & 0xffffffff) + (result >> 32);
+ }
+ if (len & 4) {
+ result += *(unsigned int *) buff;
+ buff += 4;
+ }
+ }
+ if (len & 2) {
+ result += *(unsigned short *) buff;
+ buff += 2;
+ }
+ }
+ if (len & 1)
+ result += *buff;
+ result = from64to16(result);
+ if (odd)
+ result = ((result >> 8) & 0xff) | ((result & 0xff) << 8);
+out:
+ return result;
+}
+
+/*
+ * This is a version of ip_compute_csum() optimized for IP headers,
+ * which always checksum on 4 octet boundaries.
+ */
+unsigned short ip_fast_csum(unsigned char * iph, unsigned int ihl)
+{
+ return ~do_csum(iph,ihl*4);
+}
+
+/*
+ * computes the checksum of a memory block at buff, length len,
+ * and adds in "sum" (32-bit)
+ *
+ * returns a 32-bit number suitable for feeding into itself
+ * or csum_tcpudp_magic
+ *
+ * this function must be called with even lengths, except
+ * for the last fragment, which may be odd
+ *
+ * it's best to have buff aligned on a 32-bit boundary
+ */
+unsigned int csum_partial(const unsigned char * buff, int len, unsigned int sum)
+{
+ unsigned long result = do_csum(buff, len);
+
+ /* add in old sum, and carry.. */
+ result += sum;
+ /* 32+c bits -> 32 bits */
+ result = (result & 0xffffffff) + (result >> 32);
+ return result;
+}
+
+EXPORT_SYMBOL(csum_partial);
+
+/*
+ * this routine is used for miscellaneous IP-like checksums, mainly
+ * in icmp.c
+ */
+unsigned short ip_compute_csum(unsigned char * buff, int len)
+{
+ return ~from64to16(do_csum(buff,len));
+}
diff --git a/arch/alpha/lib/clear_page.S b/arch/alpha/lib/clear_page.S
new file mode 100644
index 000000000000..a221ae266e29
--- /dev/null
+++ b/arch/alpha/lib/clear_page.S
@@ -0,0 +1,39 @@
+/*
+ * arch/alpha/lib/clear_page.S
+ *
+ * Zero an entire page.
+ */
+
+ .text
+ .align 4
+ .global clear_page
+ .ent clear_page
+clear_page:
+ .prologue 0
+
+ lda $0,128
+ nop
+ unop
+ nop
+
+1: stq $31,0($16)
+ stq $31,8($16)
+ stq $31,16($16)
+ stq $31,24($16)
+
+ stq $31,32($16)
+ stq $31,40($16)
+ stq $31,48($16)
+ subq $0,1,$0
+
+ stq $31,56($16)
+ addq $16,64,$16
+ unop
+ bne $0,1b
+
+ ret
+ nop
+ unop
+ nop
+
+ .end clear_page
diff --git a/arch/alpha/lib/clear_user.S b/arch/alpha/lib/clear_user.S
new file mode 100644
index 000000000000..8860316c1957
--- /dev/null
+++ b/arch/alpha/lib/clear_user.S
@@ -0,0 +1,113 @@
+/*
+ * arch/alpha/lib/clear_user.S
+ * Contributed by Richard Henderson <rth@tamu.edu>
+ *
+ * Zero user space, handling exceptions as we go.
+ *
+ * We have to make sure that $0 is always up-to-date and contains the
+ * right "bytes left to zero" value (and that it is updated only _after_
+ * a successful copy). There is also some rather minor exception setup
+ * stuff.
+ *
+ * NOTE! This is not directly C-callable, because the calling semantics
+ * are different:
+ *
+ * Inputs:
+ * length in $0
+ * destination address in $6
+ * exception pointer in $7
+ * return address in $28 (exceptions expect it there)
+ *
+ * Outputs:
+ * bytes left to copy in $0
+ *
+ * Clobbers:
+ * $1,$2,$3,$4,$5,$6
+ */
+
+/* Allow an exception for an insn; exit if we get one. */
+#define EX(x,y...) \
+ 99: x,##y; \
+ .section __ex_table,"a"; \
+ .long 99b - .; \
+ lda $31, $exception-99b($31); \
+ .previous
+
+ .set noat
+ .set noreorder
+ .align 4
+
+ .globl __do_clear_user
+ .ent __do_clear_user
+ .frame $30, 0, $28
+ .prologue 0
+
+$loop:
+ and $1, 3, $4 # e0 :
+ beq $4, 1f # .. e1 :
+
+0: EX( stq_u $31, 0($6) ) # e0 : zero one word
+ subq $0, 8, $0 # .. e1 :
+ subq $4, 1, $4 # e0 :
+ addq $6, 8, $6 # .. e1 :
+ bne $4, 0b # e1 :
+ unop # :
+
+1: bic $1, 3, $1 # e0 :
+ beq $1, $tail # .. e1 :
+
+2: EX( stq_u $31, 0($6) ) # e0 : zero four words
+ subq $0, 8, $0 # .. e1 :
+ EX( stq_u $31, 8($6) ) # e0 :
+ subq $0, 8, $0 # .. e1 :
+ EX( stq_u $31, 16($6) ) # e0 :
+ subq $0, 8, $0 # .. e1 :
+ EX( stq_u $31, 24($6) ) # e0 :
+ subq $0, 8, $0 # .. e1 :
+ subq $1, 4, $1 # e0 :
+ addq $6, 32, $6 # .. e1 :
+ bne $1, 2b # e1 :
+
+$tail:
+ bne $2, 1f # e1 : is there a tail to do?
+ ret $31, ($28), 1 # .. e1 :
+
+1: EX( ldq_u $5, 0($6) ) # e0 :
+ clr $0 # .. e1 :
+ nop # e1 :
+ mskqh $5, $0, $5 # e0 :
+ EX( stq_u $5, 0($6) ) # e0 :
+ ret $31, ($28), 1 # .. e1 :
+
+__do_clear_user:
+ and $6, 7, $4 # e0 : find dest misalignment
+ beq $0, $zerolength # .. e1 :
+ addq $0, $4, $1 # e0 : bias counter
+ and $1, 7, $2 # e1 : number of bytes in tail
+ srl $1, 3, $1 # e0 :
+ beq $4, $loop # .. e1 :
+
+ EX( ldq_u $5, 0($6) ) # e0 : load dst word to mask back in
+ beq $1, $oneword # .. e1 : sub-word store?
+
+ mskql $5, $6, $5 # e0 : take care of misaligned head
+ addq $6, 8, $6 # .. e1 :
+ EX( stq_u $5, -8($6) ) # e0 :
+ addq $0, $4, $0 # .. e1 : bytes left -= 8 - misalignment
+ subq $1, 1, $1 # e0 :
+ subq $0, 8, $0 # .. e1 :
+ br $loop # e1 :
+ unop # :
+
+$oneword:
+ mskql $5, $6, $4 # e0 :
+ mskqh $5, $2, $5 # e0 :
+ or $5, $4, $5 # e1 :
+ EX( stq_u $5, 0($6) ) # e0 :
+ clr $0 # .. e1 :
+
+$zerolength:
+$exception:
+ ret $31, ($28), 1 # .. e1 :
+
+ .end __do_clear_user
diff --git a/arch/alpha/lib/copy_page.S b/arch/alpha/lib/copy_page.S
new file mode 100644
index 000000000000..9f3b97459cc6
--- /dev/null
+++ b/arch/alpha/lib/copy_page.S
@@ -0,0 +1,49 @@
+/*
+ * arch/alpha/lib/copy_page.S
+ *
+ * Copy an entire page.
+ */
+
+ .text
+ .align 4
+ .global copy_page
+ .ent copy_page
+copy_page:
+ .prologue 0
+
+ lda $18,128
+ nop
+ unop
+ nop
+
+1: ldq $0,0($17)
+ ldq $1,8($17)
+ ldq $2,16($17)
+ ldq $3,24($17)
+
+ ldq $4,32($17)
+ ldq $5,40($17)
+ ldq $6,48($17)
+ ldq $7,56($17)
+
+ stq $0,0($16)
+ subq $18,1,$18
+ stq $1,8($16)
+ addq $17,64,$17
+
+ stq $2,16($16)
+ stq $3,24($16)
+ stq $4,32($16)
+ stq $5,40($16)
+
+ stq $6,48($16)
+ stq $7,56($16)
+ addq $16,64,$16
+ bne $18, 1b
+
+ ret
+ nop
+ unop
+ nop
+
+ .end copy_page
diff --git a/arch/alpha/lib/copy_user.S b/arch/alpha/lib/copy_user.S
new file mode 100644
index 000000000000..6f3fab9eb434
--- /dev/null
+++ b/arch/alpha/lib/copy_user.S
@@ -0,0 +1,145 @@
+/*
+ * arch/alpha/lib/copy_user.S
+ *
+ * Copy to/from user space, handling exceptions as we go.. This
+ * isn't exactly pretty.
+ *
+ * This is essentially the same as "memcpy()", but with a few twists.
+ * Notably, we have to make sure that $0 is always up-to-date and
+ * contains the right "bytes left to copy" value (and that it is updated
+ * only _after_ a successful copy). There is also some rather minor
+ * exception setup stuff..
+ *
+ * NOTE! This is not directly C-callable, because the calling semantics are
+ * different:
+ *
+ * Inputs:
+ * length in $0
+ * destination address in $6
+ * source address in $7
+ * return address in $28
+ *
+ * Outputs:
+ * bytes left to copy in $0
+ *
+ * Clobbers:
+ * $1,$2,$3,$4,$5,$6,$7
+ */
+
+/* Allow an exception for an insn; exit if we get one. */
+#define EXI(x,y...) \
+ 99: x,##y; \
+ .section __ex_table,"a"; \
+ .long 99b - .; \
+ lda $31, $exitin-99b($31); \
+ .previous
+
+#define EXO(x,y...) \
+ 99: x,##y; \
+ .section __ex_table,"a"; \
+ .long 99b - .; \
+ lda $31, $exitout-99b($31); \
+ .previous
+
+ .set noat
+ .align 4
+ .globl __copy_user
+ .ent __copy_user
+__copy_user:
+ .prologue 0
+ and $6,7,$3
+ beq $0,$35
+ beq $3,$36
+ subq $3,8,$3
+ .align 4
+$37:
+ EXI( ldq_u $1,0($7) )
+ EXO( ldq_u $2,0($6) )
+ extbl $1,$7,$1
+ mskbl $2,$6,$2
+ insbl $1,$6,$1
+ addq $3,1,$3
+ bis $1,$2,$1
+ EXO( stq_u $1,0($6) )
+ subq $0,1,$0
+ addq $6,1,$6
+ addq $7,1,$7
+ beq $0,$41
+ bne $3,$37
+$36:
+ and $7,7,$1
+ bic $0,7,$4
+ beq $1,$43
+ beq $4,$48
+ EXI( ldq_u $3,0($7) )
+ .align 4
+$50:
+ EXI( ldq_u $2,8($7) )
+ subq $4,8,$4
+ extql $3,$7,$3
+ extqh $2,$7,$1
+ bis $3,$1,$1
+ EXO( stq $1,0($6) )
+ addq $7,8,$7
+ subq $0,8,$0
+ addq $6,8,$6
+ bis $2,$2,$3
+ bne $4,$50
+$48:
+ beq $0,$41
+ .align 4
+$57:
+ EXI( ldq_u $1,0($7) )
+ EXO( ldq_u $2,0($6) )
+ extbl $1,$7,$1
+ mskbl $2,$6,$2
+ insbl $1,$6,$1
+ bis $1,$2,$1
+ EXO( stq_u $1,0($6) )
+ subq $0,1,$0
+ addq $6,1,$6
+ addq $7,1,$7
+ bne $0,$57
+ br $31,$41
+ .align 4
+$43:
+ beq $4,$65
+ .align 4
+$66:
+ EXI( ldq $1,0($7) )
+ subq $4,8,$4
+ EXO( stq $1,0($6) )
+ addq $7,8,$7
+ subq $0,8,$0
+ addq $6,8,$6
+ bne $4,$66
+$65:
+ beq $0,$41
+ EXI( ldq $2,0($7) )
+ EXO( ldq $1,0($6) )
+ mskql $2,$0,$2
+ mskqh $1,$0,$1
+ bis $2,$1,$2
+ EXO( stq $2,0($6) )
+ bis $31,$31,$0
+$41:
+$35:
+$exitout:
+ ret $31,($28),1
+
+$exitin:
+ /* A stupid byte-by-byte zeroing of the rest of the output
+ buffer. This cures security holes by never leaving
+ random kernel data around to be copied elsewhere. */
+
+ mov $0,$1
+$101:
+ EXO ( ldq_u $2,0($6) )
+ subq $1,1,$1
+ mskbl $2,$6,$2
+ EXO ( stq_u $2,0($6) )
+ addq $6,1,$6
+ bgt $1,$101
+ ret $31,($28),1
+
+ .end __copy_user
diff --git a/arch/alpha/lib/csum_ipv6_magic.S b/arch/alpha/lib/csum_ipv6_magic.S
new file mode 100644
index 000000000000..e09748dbf2ed
--- /dev/null
+++ b/arch/alpha/lib/csum_ipv6_magic.S
@@ -0,0 +1,92 @@
+/*
+ * arch/alpha/lib/csum_ipv6_magic.S
+ * Contributed by Richard Henderson <rth@tamu.edu>
+ *
+ * unsigned short csum_ipv6_magic(struct in6_addr *saddr,
+ * struct in6_addr *daddr,
+ * __u32 len,
+ * unsigned short proto,
+ * unsigned int csum);
+ */
+
+ .globl csum_ipv6_magic
+ .align 4
+ .ent csum_ipv6_magic
+ .frame $30,0,$26,0
+csum_ipv6_magic:
+ .prologue 0
+
+ ldq $0,0($16) # e0 : load src & dst addr words
+ zapnot $20,15,$20 # .. e1 : zero extend incoming csum
+ extqh $18,1,$4 # e0 : byte swap len & proto while we wait
+ ldq $1,8($16) # .. e1 :
+
+ extbl $18,1,$5 # e0 :
+ ldq $2,0($17) # .. e1 :
+ extbl $18,2,$6 # e0 :
+ ldq $3,8($17) # .. e1 :
+
+ extbl $18,3,$18 # e0 :
+ sra $4,32,$4 # e0 :
+ sll $5,16,$5 # e0 :
+ addq $20,$0,$20 # .. e1 : begin summing the words
+
+ sll $6,8,$6 # e0 :
+ cmpult $20,$0,$0 # .. e1 :
+ extwh $19,7,$7 # e0 :
+ or $4,$18,$18 # .. e1 :
+
+ extbl $19,1,$19 # e0 :
+ or $5,$6,$5 # .. e1 :
+ or $18,$5,$18 # e0 : len complete
+ or $19,$7,$19 # .. e1 :
+
+ sll $19,48,$19 # e0 :
+ addq $20,$1,$20 # .. e1 :
+ sra $19,32,$19 # e0 : proto complete
+ cmpult $20,$1,$1 # .. e1 :
+
+ nop # e0 :
+ addq $20,$2,$20 # .. e1 :
+ cmpult $20,$2,$2 # e0 :
+ addq $20,$3,$20 # .. e1 :
+
+ cmpult $20,$3,$3 # e0 :
+ addq $20,$18,$20 # .. e1 :
+ cmpult $20,$18,$18 # e0 :
+ addq $20,$19,$20 # .. e1 :
+
+ cmpult $20,$19,$19 # e0 :
+ addq $0,$1,$0 # .. e1 : merge the carries back into the csum
+ addq $2,$3,$2 # e0 :
+ addq $18,$19,$18 # .. e1 :
+
+ addq $0,$2,$0 # e0 :
+ addq $20,$18,$20 # .. e1 :
+ addq $0,$20,$0 # e0 :
+ unop # :
+
+ extwl $0,2,$2 # e0 : begin folding the 64-bit value
+ zapnot $0,3,$3 # .. e1 :
+ extwl $0,4,$1 # e0 :
+ addq $2,$3,$3 # .. e1 :
+
+ extwl $0,6,$0 # e0 :
+ addq $3,$1,$3 # .. e1 :
+ addq $0,$3,$0 # e0 :
+ unop # :
+
+ extwl $0,2,$1 # e0 : fold 18-bit value
+ zapnot $0,3,$0 # .. e1 :
+ addq $0,$1,$0 # e0 :
+ unop # :
+
+ extwl $0,2,$1 # e0 : fold 17-bit value
+ zapnot $0,3,$0 # .. e1 :
+ addq $0,$1,$0 # e0 :
+ not $0,$0 # e1 : and complement.
+
+ zapnot $0,3,$0 # e0 :
+ ret # .. e1 :
+
+ .end csum_ipv6_magic
diff --git a/arch/alpha/lib/csum_partial_copy.c b/arch/alpha/lib/csum_partial_copy.c
new file mode 100644
index 000000000000..a37948f3037a
--- /dev/null
+++ b/arch/alpha/lib/csum_partial_copy.c
@@ -0,0 +1,391 @@
+/*
+ * csum_partial_copy - do IP checksumming and copy
+ *
+ * (C) Copyright 1996 Linus Torvalds
+ * accellerated versions (and 21264 assembly versions ) contributed by
+ * Rick Gorton <rick.gorton@alpha-processor.com>
+ *
+ * Don't look at this too closely - you'll go mad. The things
+ * we do for performance..
+ */
+
+#include <linux/types.h>
+#include <linux/string.h>
+#include <asm/uaccess.h>
+
+
+#define ldq_u(x,y) \
+__asm__ __volatile__("ldq_u %0,%1":"=r" (x):"m" (*(const unsigned long *)(y)))
+
+#define stq_u(x,y) \
+__asm__ __volatile__("stq_u %1,%0":"=m" (*(unsigned long *)(y)):"r" (x))
+
+#define extql(x,y,z) \
+__asm__ __volatile__("extql %1,%2,%0":"=r" (z):"r" (x),"r" (y))
+
+#define extqh(x,y,z) \
+__asm__ __volatile__("extqh %1,%2,%0":"=r" (z):"r" (x),"r" (y))
+
+#define mskql(x,y,z) \
+__asm__ __volatile__("mskql %1,%2,%0":"=r" (z):"r" (x),"r" (y))
+
+#define mskqh(x,y,z) \
+__asm__ __volatile__("mskqh %1,%2,%0":"=r" (z):"r" (x),"r" (y))
+
+#define insql(x,y,z) \
+__asm__ __volatile__("insql %1,%2,%0":"=r" (z):"r" (x),"r" (y))
+
+#define insqh(x,y,z) \
+__asm__ __volatile__("insqh %1,%2,%0":"=r" (z):"r" (x),"r" (y))
+
+
+#define __get_user_u(x,ptr) \
+({ \
+ long __guu_err; \
+ __asm__ __volatile__( \
+ "1: ldq_u %0,%2\n" \
+ "2:\n" \
+ ".section __ex_table,\"a\"\n" \
+ " .long 1b - .\n" \
+ " lda %0,2b-1b(%1)\n" \
+ ".previous" \
+ : "=r"(x), "=r"(__guu_err) \
+ : "m"(__m(ptr)), "1"(0)); \
+ __guu_err; \
+})
+
+#define __put_user_u(x,ptr) \
+({ \
+ long __puu_err; \
+ __asm__ __volatile__( \
+ "1: stq_u %2,%1\n" \
+ "2:\n" \
+ ".section __ex_table,\"a\"\n" \
+ " .long 1b - ." \
+ " lda $31,2b-1b(%0)\n" \
+ ".previous" \
+ : "=r"(__puu_err) \
+ : "m"(__m(addr)), "rJ"(x), "0"(0)); \
+ __puu_err; \
+})
+
+
+static inline unsigned short from64to16(unsigned long x)
+{
+ /* Using extract instructions is a bit more efficient
+ than the original shift/bitmask version. */
+
+ union {
+ unsigned long ul;
+ unsigned int ui[2];
+ unsigned short us[4];
+ } in_v, tmp_v, out_v;
+
+ in_v.ul = x;
+ tmp_v.ul = (unsigned long) in_v.ui[0] + (unsigned long) in_v.ui[1];
+
+ /* Since the bits of tmp_v.sh[3] are going to always be zero,
+ we don't have to bother to add that in. */
+ out_v.ul = (unsigned long) tmp_v.us[0] + (unsigned long) tmp_v.us[1]
+ + (unsigned long) tmp_v.us[2];
+
+ /* Similarly, out_v.us[2] is always zero for the final add. */
+ return out_v.us[0] + out_v.us[1];
+}
+
+
+
+/*
+ * Ok. This isn't fun, but this is the EASY case.
+ */
+static inline unsigned long
+csum_partial_cfu_aligned(const unsigned long __user *src, unsigned long *dst,
+ long len, unsigned long checksum,
+ int *errp)
+{
+ unsigned long carry = 0;
+ int err = 0;
+
+ while (len >= 0) {
+ unsigned long word;
+ err |= __get_user(word, src);
+ checksum += carry;
+ src++;
+ checksum += word;
+ len -= 8;
+ carry = checksum < word;
+ *dst = word;
+ dst++;
+ }
+ len += 8;
+ checksum += carry;
+ if (len) {
+ unsigned long word, tmp;
+ err |= __get_user(word, src);
+ tmp = *dst;
+ mskql(word, len, word);
+ checksum += word;
+ mskqh(tmp, len, tmp);
+ carry = checksum < word;
+ *dst = word | tmp;
+ checksum += carry;
+ }
+ if (err) *errp = err;
+ return checksum;
+}
+
+/*
+ * This is even less fun, but this is still reasonably
+ * easy.
+ */
+static inline unsigned long
+csum_partial_cfu_dest_aligned(const unsigned long __user *src,
+ unsigned long *dst,
+ unsigned long soff,
+ long len, unsigned long checksum,
+ int *errp)
+{
+ unsigned long first;
+ unsigned long word, carry;
+ unsigned long lastsrc = 7+len+(unsigned long)src;
+ int err = 0;
+
+ err |= __get_user_u(first,src);
+ carry = 0;
+ while (len >= 0) {
+ unsigned long second;
+
+ err |= __get_user_u(second, src+1);
+ extql(first, soff, word);
+ len -= 8;
+ src++;
+ extqh(second, soff, first);
+ checksum += carry;
+ word |= first;
+ first = second;
+ checksum += word;
+ *dst = word;
+ dst++;
+ carry = checksum < word;
+ }
+ len += 8;
+ checksum += carry;
+ if (len) {
+ unsigned long tmp;
+ unsigned long second;
+ err |= __get_user_u(second, lastsrc);
+ tmp = *dst;
+ extql(first, soff, word);
+ extqh(second, soff, first);
+ word |= first;
+ mskql(word, len, word);
+ checksum += word;
+ mskqh(tmp, len, tmp);
+ carry = checksum < word;
+ *dst = word | tmp;
+ checksum += carry;
+ }
+ if (err) *errp = err;
+ return checksum;
+}
+
+/*
+ * This is slightly less fun than the above..
+ */
+static inline unsigned long
+csum_partial_cfu_src_aligned(const unsigned long __user *src,
+ unsigned long *dst,
+ unsigned long doff,
+ long len, unsigned long checksum,
+ unsigned long partial_dest,
+ int *errp)
+{
+ unsigned long carry = 0;
+ unsigned long word;
+ unsigned long second_dest;
+ int err = 0;
+
+ mskql(partial_dest, doff, partial_dest);
+ while (len >= 0) {
+ err |= __get_user(word, src);
+ len -= 8;
+ insql(word, doff, second_dest);
+ checksum += carry;
+ stq_u(partial_dest | second_dest, dst);
+ src++;
+ checksum += word;
+ insqh(word, doff, partial_dest);
+ carry = checksum < word;
+ dst++;
+ }
+ len += 8;
+ if (len) {
+ checksum += carry;
+ err |= __get_user(word, src);
+ mskql(word, len, word);
+ len -= 8;
+ checksum += word;
+ insql(word, doff, second_dest);
+ len += doff;
+ carry = checksum < word;
+ partial_dest |= second_dest;
+ if (len >= 0) {
+ stq_u(partial_dest, dst);
+ if (!len) goto out;
+ dst++;
+ insqh(word, doff, partial_dest);
+ }
+ doff = len;
+ }
+ ldq_u(second_dest, dst);
+ mskqh(second_dest, doff, second_dest);
+ stq_u(partial_dest | second_dest, dst);
+out:
+ checksum += carry;
+ if (err) *errp = err;
+ return checksum;
+}
+
+/*
+ * This is so totally un-fun that it's frightening. Don't
+ * look at this too closely, you'll go blind.
+ */
+static inline unsigned long
+csum_partial_cfu_unaligned(const unsigned long __user * src,
+ unsigned long * dst,
+ unsigned long soff, unsigned long doff,
+ long len, unsigned long checksum,
+ unsigned long partial_dest,
+ int *errp)
+{
+ unsigned long carry = 0;
+ unsigned long first;
+ unsigned long lastsrc;
+ int err = 0;
+
+ err |= __get_user_u(first, src);
+ lastsrc = 7+len+(unsigned long)src;
+ mskql(partial_dest, doff, partial_dest);
+ while (len >= 0) {
+ unsigned long second, word;
+ unsigned long second_dest;
+
+ err |= __get_user_u(second, src+1);
+ extql(first, soff, word);
+ checksum += carry;
+ len -= 8;
+ extqh(second, soff, first);
+ src++;
+ word |= first;
+ first = second;
+ insql(word, doff, second_dest);
+ checksum += word;
+ stq_u(partial_dest | second_dest, dst);
+ carry = checksum < word;
+ insqh(word, doff, partial_dest);
+ dst++;
+ }
+ len += doff;
+ checksum += carry;
+ if (len >= 0) {
+ unsigned long second, word;
+ unsigned long second_dest;
+
+ err |= __get_user_u(second, lastsrc);
+ extql(first, soff, word);
+ extqh(second, soff, first);
+ word |= first;
+ first = second;
+ mskql(word, len-doff, word);
+ checksum += word;
+ insql(word, doff, second_dest);
+ carry = checksum < word;
+ stq_u(partial_dest | second_dest, dst);
+ if (len) {
+ ldq_u(second_dest, dst+1);
+ insqh(word, doff, partial_dest);
+ mskqh(second_dest, len, second_dest);
+ stq_u(partial_dest | second_dest, dst+1);
+ }
+ checksum += carry;
+ } else {
+ unsigned long second, word;
+ unsigned long second_dest;
+
+ err |= __get_user_u(second, lastsrc);
+ extql(first, soff, word);
+ extqh(second, soff, first);
+ word |= first;
+ ldq_u(second_dest, dst);
+ mskql(word, len-doff, word);
+ checksum += word;
+ mskqh(second_dest, len, second_dest);
+ carry = checksum < word;
+ insql(word, doff, word);
+ stq_u(partial_dest | word | second_dest, dst);
+ checksum += carry;
+ }
+ if (err) *errp = err;
+ return checksum;
+}
+
+static unsigned int
+do_csum_partial_copy_from_user(const char __user *src, char *dst, int len,
+ unsigned int sum, int *errp)
+{
+ unsigned long checksum = (unsigned) sum;
+ unsigned long soff = 7 & (unsigned long) src;
+ unsigned long doff = 7 & (unsigned long) dst;
+
+ if (len) {
+ if (!doff) {
+ if (!soff)
+ checksum = csum_partial_cfu_aligned(
+ (const unsigned long __user *) src,
+ (unsigned long *) dst,
+ len-8, checksum, errp);
+ else
+ checksum = csum_partial_cfu_dest_aligned(
+ (const unsigned long __user *) src,
+ (unsigned long *) dst,
+ soff, len-8, checksum, errp);
+ } else {
+ unsigned long partial_dest;
+ ldq_u(partial_dest, dst);
+ if (!soff)
+ checksum = csum_partial_cfu_src_aligned(
+ (const unsigned long __user *) src,
+ (unsigned long *) dst,
+ doff, len-8, checksum,
+ partial_dest, errp);
+ else
+ checksum = csum_partial_cfu_unaligned(
+ (const unsigned long __user *) src,
+ (unsigned long *) dst,
+ soff, doff, len-8, checksum,
+ partial_dest, errp);
+ }
+ checksum = from64to16 (checksum);
+ }
+ return checksum;
+}
+
+unsigned int
+csum_partial_copy_from_user(const char __user *src, char *dst, int len,
+ unsigned int sum, int *errp)
+{
+ if (!access_ok(VERIFY_READ, src, len)) {
+ *errp = -EFAULT;
+ memset(dst, 0, len);
+ return sum;
+ }
+
+ return do_csum_partial_copy_from_user(src, dst, len, sum, errp);
+}
+
+unsigned int
+csum_partial_copy_nocheck(const char __user *src, char *dst, int len,
+ unsigned int sum)
+{
+ return do_csum_partial_copy_from_user(src, dst, len, sum, NULL);
+}
diff --git a/arch/alpha/lib/dbg_current.S b/arch/alpha/lib/dbg_current.S
new file mode 100644
index 000000000000..e6d071015f9b
--- /dev/null
+++ b/arch/alpha/lib/dbg_current.S
@@ -0,0 +1,29 @@
+/*
+ * arch/alpha/lib/dbg_current.S
+ * Contributed by Richard Henderson (rth@cygnus.com)
+ *
+ * Trap if we find current not correct.
+ */
+
+#include <asm/pal.h>
+
+ .text
+ .set noat
+
+ .globl _mcount
+ .ent _mcount
+_mcount:
+ .frame $30, 0, $28, 0
+ .prologue 0
+
+ lda $0, -0x4000($30)
+ cmpult $8, $30, $1
+ cmpule $0, $30, $2
+ and $1, $2, $3
+ bne $3, 1f
+
+ call_pal PAL_bugchk
+
+1: ret $31, ($28), 1
+
+ .end _mcount
diff --git a/arch/alpha/lib/dbg_stackcheck.S b/arch/alpha/lib/dbg_stackcheck.S
new file mode 100644
index 000000000000..cc5ce3a5fcad
--- /dev/null
+++ b/arch/alpha/lib/dbg_stackcheck.S
@@ -0,0 +1,27 @@
+/*
+ * arch/alpha/lib/stackcheck.S
+ * Contributed by Richard Henderson (rth@tamu.edu)
+ *
+ * Verify that we have not overflowed the stack. Oops if we have.
+ */
+
+#include <asm/asm_offsets.h>
+
+ .text
+ .set noat
+
+ .align 3
+ .globl _mcount
+ .ent _mcount
+_mcount:
+ .frame $30, 0, $28, 0
+ .prologue 0
+
+ lda $0, TASK_SIZE($8)
+ cmpult $30, $0, $0
+ bne $0, 1f
+ ret ($28)
+1: stq $31, -8($31) # oops me, damn it.
+ br 1b
+
+ .end _mcount
diff --git a/arch/alpha/lib/dbg_stackkill.S b/arch/alpha/lib/dbg_stackkill.S
new file mode 100644
index 000000000000..e09f2ae1e09e
--- /dev/null
+++ b/arch/alpha/lib/dbg_stackkill.S
@@ -0,0 +1,35 @@
+/*
+ * arch/alpha/lib/killstack.S
+ * Contributed by Richard Henderson (rth@cygnus.com)
+ *
+ * Clobber the balance of the kernel stack, hoping to catch
+ * uninitialized local variables in the act.
+ */
+
+#include <asm/asm_offsets.h>
+
+ .text
+ .set noat
+
+ .align 5
+ .globl _mcount
+ .ent _mcount
+_mcount:
+ .frame $30, 0, $28, 0
+ .prologue 0
+
+ ldi $0, 0xdeadbeef
+ lda $2, -STACK_SIZE
+ sll $0, 32, $1
+ and $30, $2, $2
+ or $0, $1, $0
+ lda $2, TASK_SIZE($2)
+ cmpult $2, $30, $1
+ beq $1, 2f
+1: stq $0, 0($2)
+ addq $2, 8, $2
+ cmpult $2, $30, $1
+ bne $1, 1b
+2: ret ($28)
+
+ .end _mcount
diff --git a/arch/alpha/lib/dec_and_lock.c b/arch/alpha/lib/dec_and_lock.c
new file mode 100644
index 000000000000..6ae2500a9d9e
--- /dev/null
+++ b/arch/alpha/lib/dec_and_lock.c
@@ -0,0 +1,42 @@
+/*
+ * arch/alpha/lib/dec_and_lock.c
+ *
+ * ll/sc version of atomic_dec_and_lock()
+ *
+ */
+
+#include <linux/spinlock.h>
+#include <asm/atomic.h>
+
+ asm (".text \n\
+ .global _atomic_dec_and_lock \n\
+ .ent _atomic_dec_and_lock \n\
+ .align 4 \n\
+_atomic_dec_and_lock: \n\
+ .prologue 0 \n\
+1: ldl_l $1, 0($16) \n\
+ subl $1, 1, $1 \n\
+ beq $1, 2f \n\
+ stl_c $1, 0($16) \n\
+ beq $1, 4f \n\
+ mb \n\
+ clr $0 \n\
+ ret \n\
+2: br $29, 3f \n\
+3: ldgp $29, 0($29) \n\
+ br $atomic_dec_and_lock_1..ng \n\
+ .subsection 2 \n\
+4: br 1b \n\
+ .previous \n\
+ .end _atomic_dec_and_lock");
+
+static int __attribute_used__
+atomic_dec_and_lock_1(atomic_t *atomic, spinlock_t *lock)
+{
+ /* Slow path */
+ spin_lock(lock);
+ if (atomic_dec_and_test(atomic))
+ return 1;
+ spin_unlock(lock);
+ return 0;
+}
diff --git a/arch/alpha/lib/divide.S b/arch/alpha/lib/divide.S
new file mode 100644
index 000000000000..2d1a0484a99e
--- /dev/null
+++ b/arch/alpha/lib/divide.S
@@ -0,0 +1,195 @@
+/*
+ * arch/alpha/lib/divide.S
+ *
+ * (C) 1995 Linus Torvalds
+ *
+ * Alpha division..
+ */
+
+/*
+ * The alpha chip doesn't provide hardware division, so we have to do it
+ * by hand. The compiler expects the functions
+ *
+ * __divqu: 64-bit unsigned long divide
+ * __remqu: 64-bit unsigned long remainder
+ * __divqs/__remqs: signed 64-bit
+ * __divlu/__remlu: unsigned 32-bit
+ * __divls/__remls: signed 32-bit
+ *
+ * These are not normal C functions: instead of the normal
+ * calling sequence, these expect their arguments in registers
+ * $24 and $25, and return the result in $27. Register $28 may
+ * be clobbered (assembly temporary), anything else must be saved.
+ *
+ * In short: painful.
+ *
+ * This is a rather simple bit-at-a-time algorithm: it's very good
+ * at dividing random 64-bit numbers, but the more usual case where
+ * the divisor is small is handled better by the DEC algorithm
+ * using lookup tables. This uses much less memory, though, and is
+ * nicer on the cache.. Besides, I don't know the copyright status
+ * of the DEC code.
+ */
+
+/*
+ * My temporaries:
+ * $0 - current bit
+ * $1 - shifted divisor
+ * $2 - modulus/quotient
+ *
+ * $23 - return address
+ * $24 - dividend
+ * $25 - divisor
+ *
+ * $27 - quotient/modulus
+ * $28 - compare status
+ */
+
+#define halt .long 0
+
+/*
+ * Select function type and registers
+ */
+#define mask $0
+#define divisor $1
+#define compare $28
+#define tmp1 $3
+#define tmp2 $4
+
+#ifdef DIV
+#define DIV_ONLY(x,y...) x,##y
+#define MOD_ONLY(x,y...)
+#define func(x) __div##x
+#define modulus $2
+#define quotient $27
+#define GETSIGN(x) xor $24,$25,x
+#define STACK 48
+#else
+#define DIV_ONLY(x,y...)
+#define MOD_ONLY(x,y...) x,##y
+#define func(x) __rem##x
+#define modulus $27
+#define quotient $2
+#define GETSIGN(x) bis $24,$24,x
+#define STACK 32
+#endif
+
+/*
+ * For 32-bit operations, we need to extend to 64-bit
+ */
+#ifdef INTSIZE
+#define ufunction func(lu)
+#define sfunction func(l)
+#define LONGIFY(x) zapnot x,15,x
+#define SLONGIFY(x) addl x,0,x
+#else
+#define ufunction func(qu)
+#define sfunction func(q)
+#define LONGIFY(x)
+#define SLONGIFY(x)
+#endif
+
+.set noat
+.align 3
+.globl ufunction
+.ent ufunction
+ufunction:
+ subq $30,STACK,$30
+ .frame $30,STACK,$23
+ .prologue 0
+
+7: stq $1, 0($30)
+ bis $25,$25,divisor
+ stq $2, 8($30)
+ bis $24,$24,modulus
+ stq $0,16($30)
+ bis $31,$31,quotient
+ LONGIFY(divisor)
+ stq tmp1,24($30)
+ LONGIFY(modulus)
+ bis $31,1,mask
+ DIV_ONLY(stq tmp2,32($30))
+ beq divisor, 9f /* div by zero */
+
+#ifdef INTSIZE
+ /*
+ * shift divisor left, using 3-bit shifts for
+ * 32-bit divides as we can't overflow. Three-bit
+ * shifts will result in looping three times less
+ * here, but can result in two loops more later.
+ * Thus using a large shift isn't worth it (and
+ * s8add pairs better than a sll..)
+ */
+1: cmpult divisor,modulus,compare
+ s8addq divisor,$31,divisor
+ s8addq mask,$31,mask
+ bne compare,1b
+#else
+1: cmpult divisor,modulus,compare
+ blt divisor, 2f
+ addq divisor,divisor,divisor
+ addq mask,mask,mask
+ bne compare,1b
+ unop
+#endif
+
+ /* ok, start to go right again.. */
+2: DIV_ONLY(addq quotient,mask,tmp2)
+ srl mask,1,mask
+ cmpule divisor,modulus,compare
+ subq modulus,divisor,tmp1
+ DIV_ONLY(cmovne compare,tmp2,quotient)
+ srl divisor,1,divisor
+ cmovne compare,tmp1,modulus
+ bne mask,2b
+
+9: ldq $1, 0($30)
+ ldq $2, 8($30)
+ ldq $0,16($30)
+ ldq tmp1,24($30)
+ DIV_ONLY(ldq tmp2,32($30))
+ addq $30,STACK,$30
+ ret $31,($23),1
+ .end ufunction
+
+/*
+ * Uhh.. Ugly signed division. I'd rather not have it at all, but
+ * it's needed in some circumstances. There are different ways to
+ * handle this, really. This does:
+ * -a / b = a / -b = -(a / b)
+ * -a % b = -(a % b)
+ * a % -b = a % b
+ * which is probably not the best solution, but at least should
+ * have the property that (x/y)*y + (x%y) = x.
+ */
+.align 3
+.globl sfunction
+.ent sfunction
+sfunction:
+ subq $30,STACK,$30
+ .frame $30,STACK,$23
+ .prologue 0
+ bis $24,$25,$28
+ SLONGIFY($28)
+ bge $28,7b
+ stq $24,0($30)
+ subq $31,$24,$28
+ stq $25,8($30)
+ cmovlt $24,$28,$24 /* abs($24) */
+ stq $23,16($30)
+ subq $31,$25,$28
+ stq tmp1,24($30)
+ cmovlt $25,$28,$25 /* abs($25) */
+ unop
+ bsr $23,ufunction
+ ldq $24,0($30)
+ ldq $25,8($30)
+ GETSIGN($28)
+ subq $31,$27,tmp1
+ SLONGIFY($28)
+ ldq $23,16($30)
+ cmovlt $28,tmp1,$27
+ ldq tmp1,24($30)
+ addq $30,STACK,$30
+ ret $31,($23),1
+ .end sfunction
diff --git a/arch/alpha/lib/ev6-clear_page.S b/arch/alpha/lib/ev6-clear_page.S
new file mode 100644
index 000000000000..adf4f7be0e2b
--- /dev/null
+++ b/arch/alpha/lib/ev6-clear_page.S
@@ -0,0 +1,54 @@
+/*
+ * arch/alpha/lib/ev6-clear_page.S
+ *
+ * Zero an entire page.
+ */
+
+ .text
+ .align 4
+ .global clear_page
+ .ent clear_page
+clear_page:
+ .prologue 0
+
+ lda $0,128
+ lda $1,125
+ addq $16,64,$2
+ addq $16,128,$3
+
+ addq $16,192,$17
+ wh64 ($16)
+ wh64 ($2)
+ wh64 ($3)
+
+1: wh64 ($17)
+ stq $31,0($16)
+ subq $0,1,$0
+ subq $1,1,$1
+
+ stq $31,8($16)
+ stq $31,16($16)
+ addq $17,64,$2
+ nop
+
+ stq $31,24($16)
+ stq $31,32($16)
+ cmovgt $1,$2,$17
+ nop
+
+ stq $31,40($16)
+ stq $31,48($16)
+ nop
+ nop
+
+ stq $31,56($16)
+ addq $16,64,$16
+ nop
+ bne $0,1b
+
+ ret
+ nop
+ nop
+ nop
+
+ .end clear_page
diff --git a/arch/alpha/lib/ev6-clear_user.S b/arch/alpha/lib/ev6-clear_user.S
new file mode 100644
index 000000000000..4f42a16b7f53
--- /dev/null
+++ b/arch/alpha/lib/ev6-clear_user.S
@@ -0,0 +1,225 @@
+/*
+ * arch/alpha/lib/ev6-clear_user.S
+ * 21264 version contributed by Rick Gorton <rick.gorton@alpha-processor.com>
+ *
+ * Zero user space, handling exceptions as we go.
+ *
+ * We have to make sure that $0 is always up-to-date and contains the
+ * right "bytes left to zero" value (and that it is updated only _after_
+ * a successful copy). There is also some rather minor exception setup
+ * stuff.
+ *
+ * NOTE! This is not directly C-callable, because the calling semantics
+ * are different:
+ *
+ * Inputs:
+ * length in $0
+ * destination address in $6
+ * exception pointer in $7
+ * return address in $28 (exceptions expect it there)
+ *
+ * Outputs:
+ * bytes left to copy in $0
+ *
+ * Clobbers:
+ * $1,$2,$3,$4,$5,$6
+ *
+ * Much of the information about 21264 scheduling/coding comes from:
+ * Compiler Writer's Guide for the Alpha 21264
+ * abbreviated as 'CWG' in other comments here
+ * ftp.digital.com/pub/Digital/info/semiconductor/literature/dsc-library.html
+ * Scheduling notation:
+ * E - either cluster
+ * U - upper subcluster; U0 - subcluster U0; U1 - subcluster U1
+ * L - lower subcluster; L0 - subcluster L0; L1 - subcluster L1
+ * Try not to change the actual algorithm if possible for consistency.
+ * Determining actual stalls (other than slotting) doesn't appear to be easy to do.
+ * From perusing the source code context where this routine is called, it is
+ * a fair assumption that significant fractions of entire pages are zeroed, so
+ * it's going to be worth the effort to hand-unroll a big loop, and use wh64.
+ * ASSUMPTION:
+ * The believed purpose of only updating $0 after a store is that a signal
+ * may come along during the execution of this chunk of code, and we don't
+ * want to leave a hole (and we also want to avoid repeating lots of work)
+ */
+
+/* Allow an exception for an insn; exit if we get one. */
+#define EX(x,y...) \
+ 99: x,##y; \
+ .section __ex_table,"a"; \
+ .long 99b - .; \
+ lda $31, $exception-99b($31); \
+ .previous
+
+ .set noat
+ .set noreorder
+ .align 4
+
+ .globl __do_clear_user
+ .ent __do_clear_user
+ .frame $30, 0, $28
+ .prologue 0
+
+ # Pipeline info : Slotting & Comments
+__do_clear_user:
+ and $6, 7, $4 # .. E .. .. : find dest head misalignment
+ beq $0, $zerolength # U .. .. .. : U L U L
+
+ addq $0, $4, $1 # .. .. .. E : bias counter
+ and $1, 7, $2 # .. .. E .. : number of misaligned bytes in tail
+# Note - we never actually use $2, so this is a moot computation
+# and we can rewrite this later...
+ srl $1, 3, $1 # .. E .. .. : number of quadwords to clear
+ beq $4, $headalign # U .. .. .. : U L U L
+
+/*
+ * Head is not aligned. Write (8 - $4) bytes to head of destination
+ * This means $6 is known to be misaligned
+ */
+ EX( ldq_u $5, 0($6) ) # .. .. .. L : load dst word to mask back in
+ beq $1, $onebyte # .. .. U .. : sub-word store?
+ mskql $5, $6, $5 # .. U .. .. : take care of misaligned head
+ addq $6, 8, $6 # E .. .. .. : L U U L
+
+ EX( stq_u $5, -8($6) ) # .. .. .. L :
+ subq $1, 1, $1 # .. .. E .. :
+ addq $0, $4, $0 # .. E .. .. : bytes left -= 8 - misalignment
+ subq $0, 8, $0 # E .. .. .. : U L U L
+
+ .align 4
+/*
+ * (The .align directive ought to be a moot point)
+ * values upon initial entry to the loop
+ * $1 is number of quadwords to clear (zero is a valid value)
+ * $2 is number of trailing bytes (0..7) ($2 never used...)
+ * $6 is known to be aligned 0mod8
+ */
+$headalign:
+ subq $1, 16, $4 # .. .. .. E : If < 16, we can not use the huge loop
+ and $6, 0x3f, $2 # .. .. E .. : Forward work for huge loop
+ subq $2, 0x40, $3 # .. E .. .. : bias counter (huge loop)
+ blt $4, $trailquad # U .. .. .. : U L U L
+
+/*
+ * We know that we're going to do at least 16 quads, which means we are
+ * going to be able to use the large block clear loop at least once.
+ * Figure out how many quads we need to clear before we are 0mod64 aligned
+ * so we can use the wh64 instruction.
+ */
+
+ nop # .. .. .. E
+ nop # .. .. E ..
+ nop # .. E .. ..
+ beq $3, $bigalign # U .. .. .. : U L U L : Aligned 0mod64
+
+$alignmod64:
+ EX( stq_u $31, 0($6) ) # .. .. .. L
+ addq $3, 8, $3 # .. .. E ..
+ subq $0, 8, $0 # .. E .. ..
+ nop # E .. .. .. : U L U L
+
+ nop # .. .. .. E
+ subq $1, 1, $1 # .. .. E ..
+ addq $6, 8, $6 # .. E .. ..
+ blt $3, $alignmod64 # U .. .. .. : U L U L
+
+$bigalign:
+/*
+ * $0 is the number of bytes left
+ * $1 is the number of quads left
+ * $6 is aligned 0mod64
+ * we know that we'll be taking a minimum of one trip through
+ * CWG Section 3.7.6: do not expect a sustained store rate of > 1/cycle
+ * We are _not_ going to update $0 after every single store. That
+ * would be silly, because there will be cross-cluster dependencies
+ * no matter how the code is scheduled. By doing it in slightly
+ * staggered fashion, we can still do this loop in 5 fetches
+ * The worse case will be doing two extra quads in some future execution,
+ * in the event of an interrupted clear.
+ * Assumes the wh64 needs to be for 2 trips through the loop in the future
+ * The wh64 is issued on for the starting destination address for trip +2
+ * through the loop, and if there are less than two trips left, the target
+ * address will be for the current trip.
+ */
+ nop # E :
+ nop # E :
+ nop # E :
+ bis $6,$6,$3 # E : U L U L : Initial wh64 address is dest
+ /* This might actually help for the current trip... */
+
+$do_wh64:
+ wh64 ($3) # .. .. .. L1 : memory subsystem hint
+ subq $1, 16, $4 # .. .. E .. : Forward calculation - repeat the loop?
+ EX( stq_u $31, 0($6) ) # .. L .. ..
+ subq $0, 8, $0 # E .. .. .. : U L U L
+
+ addq $6, 128, $3 # E : Target address of wh64
+ EX( stq_u $31, 8($6) ) # L :
+ EX( stq_u $31, 16($6) ) # L :
+ subq $0, 16, $0 # E : U L L U
+
+ nop # E :
+ EX( stq_u $31, 24($6) ) # L :
+ EX( stq_u $31, 32($6) ) # L :
+ subq $0, 168, $5 # E : U L L U : two trips through the loop left?
+ /* 168 = 192 - 24, since we've already completed some stores */
+
+ subq $0, 16, $0 # E :
+ EX( stq_u $31, 40($6) ) # L :
+ EX( stq_u $31, 48($6) ) # L :
+ cmovlt $5, $6, $3 # E : U L L U : Latency 2, extra mapping cycle
+
+ subq $1, 8, $1 # E :
+ subq $0, 16, $0 # E :
+ EX( stq_u $31, 56($6) ) # L :
+ nop # E : U L U L
+
+ nop # E :
+ subq $0, 8, $0 # E :
+ addq $6, 64, $6 # E :
+ bge $4, $do_wh64 # U : U L U L
+
+$trailquad:
+ # zero to 16 quadwords left to store, plus any trailing bytes
+ # $1 is the number of quadwords left to go.
+ #
+ nop # .. .. .. E
+ nop # .. .. E ..
+ nop # .. E .. ..
+ beq $1, $trailbytes # U .. .. .. : U L U L : Only 0..7 bytes to go
+
+$onequad:
+ EX( stq_u $31, 0($6) ) # .. .. .. L
+ subq $1, 1, $1 # .. .. E ..
+ subq $0, 8, $0 # .. E .. ..
+ nop # E .. .. .. : U L U L
+
+ nop # .. .. .. E
+ nop # .. .. E ..
+ addq $6, 8, $6 # .. E .. ..
+ bgt $1, $onequad # U .. .. .. : U L U L
+
+ # We have an unknown number of bytes left to go.
+$trailbytes:
+ nop # .. .. .. E
+ nop # .. .. E ..
+ nop # .. E .. ..
+ beq $0, $zerolength # U .. .. .. : U L U L
+
+ # $0 contains the number of bytes left to copy (0..31)
+ # so we will use $0 as the loop counter
+ # We know for a fact that $0 > 0 zero due to previous context
+$onebyte:
+ EX( stb $31, 0($6) ) # .. .. .. L
+ subq $0, 1, $0 # .. .. E .. :
+ addq $6, 1, $6 # .. E .. .. :
+ bgt $0, $onebyte # U .. .. .. : U L U L
+
+$zerolength:
+$exception: # Destination for exception recovery(?)
+ nop # .. .. .. E :
+ nop # .. .. E .. :
+ nop # .. E .. .. :
+ ret $31, ($28), 1 # L0 .. .. .. : L U L U
+ .end __do_clear_user
+
diff --git a/arch/alpha/lib/ev6-copy_page.S b/arch/alpha/lib/ev6-copy_page.S
new file mode 100644
index 000000000000..b789db192754
--- /dev/null
+++ b/arch/alpha/lib/ev6-copy_page.S
@@ -0,0 +1,203 @@
+/*
+ * arch/alpha/lib/ev6-copy_page.S
+ *
+ * Copy an entire page.
+ */
+
+/* The following comparison of this routine vs the normal copy_page.S
+ was written by an unnamed ev6 hardware designer and forwarded to me
+ via Steven Hobbs <hobbs@steven.zko.dec.com>.
+
+ First Problem: STQ overflows.
+ -----------------------------
+
+ It would be nice if EV6 handled every resource overflow efficiently,
+ but for some it doesn't. Including store queue overflows. It causes
+ a trap and a restart of the pipe.
+
+ To get around this we sometimes use (to borrow a term from a VSSAD
+ researcher) "aeration". The idea is to slow the rate at which the
+ processor receives valid instructions by inserting nops in the fetch
+ path. In doing so, you can prevent the overflow and actually make
+ the code run faster. You can, of course, take advantage of the fact
+ that the processor can fetch at most 4 aligned instructions per cycle.
+
+ I inserted enough nops to force it to take 10 cycles to fetch the
+ loop code. In theory, EV6 should be able to execute this loop in
+ 9 cycles but I was not able to get it to run that fast -- the initial
+ conditions were such that I could not reach this optimum rate on
+ (chaotic) EV6. I wrote the code such that everything would issue
+ in order.
+
+ Second Problem: Dcache index matches.
+ -------------------------------------
+
+ If you are going to use this routine on random aligned pages, there
+ is a 25% chance that the pages will be at the same dcache indices.
+ This results in many nasty memory traps without care.
+
+ The solution is to schedule the prefetches to avoid the memory
+ conflicts. I schedule the wh64 prefetches farther ahead of the
+ read prefetches to avoid this problem.
+
+ Third Problem: Needs more prefetching.
+ --------------------------------------
+
+ In order to improve the code I added deeper prefetching to take the
+ most advantage of EV6's bandwidth.
+
+ I also prefetched the read stream. Note that adding the read prefetch
+ forced me to add another cycle to the inner-most kernel - up to 11
+ from the original 8 cycles per iteration. We could improve performance
+ further by unrolling the loop and doing multiple prefetches per cycle.
+
+ I think that the code below will be very robust and fast code for the
+ purposes of copying aligned pages. It is slower when both source and
+ destination pages are in the dcache, but it is my guess that this is
+ less important than the dcache miss case. */
+
+
+ .text
+ .align 4
+ .global copy_page
+ .ent copy_page
+copy_page:
+ .prologue 0
+
+ /* Prefetch 5 read cachelines; write-hint 10 cache lines. */
+ wh64 ($16)
+ ldl $31,0($17)
+ ldl $31,64($17)
+ lda $1,1*64($16)
+
+ wh64 ($1)
+ ldl $31,128($17)
+ ldl $31,192($17)
+ lda $1,2*64($16)
+
+ wh64 ($1)
+ ldl $31,256($17)
+ lda $18,118
+ lda $1,3*64($16)
+
+ wh64 ($1)
+ nop
+ lda $1,4*64($16)
+ lda $2,5*64($16)
+
+ wh64 ($1)
+ wh64 ($2)
+ lda $1,6*64($16)
+ lda $2,7*64($16)
+
+ wh64 ($1)
+ wh64 ($2)
+ lda $1,8*64($16)
+ lda $2,9*64($16)
+
+ wh64 ($1)
+ wh64 ($2)
+ lda $19,10*64($16)
+ nop
+
+ /* Main prefetching/write-hinting loop. */
+1: ldq $0,0($17)
+ ldq $1,8($17)
+ unop
+ unop
+
+ unop
+ unop
+ ldq $2,16($17)
+ ldq $3,24($17)
+
+ ldq $4,32($17)
+ ldq $5,40($17)
+ unop
+ unop
+
+ unop
+ unop
+ ldq $6,48($17)
+ ldq $7,56($17)
+
+ ldl $31,320($17)
+ unop
+ unop
+ unop
+
+ /* This gives the extra cycle of aeration above the minimum. */
+ unop
+ unop
+ unop
+ unop
+
+ wh64 ($19)
+ unop
+ unop
+ unop
+
+ stq $0,0($16)
+ subq $18,1,$18
+ stq $1,8($16)
+ unop
+
+ unop
+ stq $2,16($16)
+ addq $17,64,$17
+ stq $3,24($16)
+
+ stq $4,32($16)
+ stq $5,40($16)
+ addq $19,64,$19
+ unop
+
+ stq $6,48($16)
+ stq $7,56($16)
+ addq $16,64,$16
+ bne $18, 1b
+
+ /* Prefetch the final 5 cache lines of the read stream. */
+ lda $18,10
+ ldl $31,320($17)
+ ldl $31,384($17)
+ ldl $31,448($17)
+
+ ldl $31,512($17)
+ ldl $31,576($17)
+ nop
+ nop
+
+ /* Non-prefetching, non-write-hinting cleanup loop for the
+ final 10 cache lines. */
+2: ldq $0,0($17)
+ ldq $1,8($17)
+ ldq $2,16($17)
+ ldq $3,24($17)
+
+ ldq $4,32($17)
+ ldq $5,40($17)
+ ldq $6,48($17)
+ ldq $7,56($17)
+
+ stq $0,0($16)
+ subq $18,1,$18
+ stq $1,8($16)
+ addq $17,64,$17
+
+ stq $2,16($16)
+ stq $3,24($16)
+ stq $4,32($16)
+ stq $5,40($16)
+
+ stq $6,48($16)
+ stq $7,56($16)
+ addq $16,64,$16
+ bne $18, 2b
+
+ ret
+ nop
+ unop
+ nop
+
+ .end copy_page
diff --git a/arch/alpha/lib/ev6-copy_user.S b/arch/alpha/lib/ev6-copy_user.S
new file mode 100644
index 000000000000..db42ffe9c350
--- /dev/null
+++ b/arch/alpha/lib/ev6-copy_user.S
@@ -0,0 +1,259 @@
+/*
+ * arch/alpha/lib/ev6-copy_user.S
+ *
+ * 21264 version contributed by Rick Gorton <rick.gorton@alpha-processor.com>
+ *
+ * Copy to/from user space, handling exceptions as we go.. This
+ * isn't exactly pretty.
+ *
+ * This is essentially the same as "memcpy()", but with a few twists.
+ * Notably, we have to make sure that $0 is always up-to-date and
+ * contains the right "bytes left to copy" value (and that it is updated
+ * only _after_ a successful copy). There is also some rather minor
+ * exception setup stuff..
+ *
+ * NOTE! This is not directly C-callable, because the calling semantics are
+ * different:
+ *
+ * Inputs:
+ * length in $0
+ * destination address in $6
+ * source address in $7
+ * return address in $28
+ *
+ * Outputs:
+ * bytes left to copy in $0
+ *
+ * Clobbers:
+ * $1,$2,$3,$4,$5,$6,$7
+ *
+ * Much of the information about 21264 scheduling/coding comes from:
+ * Compiler Writer's Guide for the Alpha 21264
+ * abbreviated as 'CWG' in other comments here
+ * ftp.digital.com/pub/Digital/info/semiconductor/literature/dsc-library.html
+ * Scheduling notation:
+ * E - either cluster
+ * U - upper subcluster; U0 - subcluster U0; U1 - subcluster U1
+ * L - lower subcluster; L0 - subcluster L0; L1 - subcluster L1
+ */
+
+/* Allow an exception for an insn; exit if we get one. */
+#define EXI(x,y...) \
+ 99: x,##y; \
+ .section __ex_table,"a"; \
+ .long 99b - .; \
+ lda $31, $exitin-99b($31); \
+ .previous
+
+#define EXO(x,y...) \
+ 99: x,##y; \
+ .section __ex_table,"a"; \
+ .long 99b - .; \
+ lda $31, $exitout-99b($31); \
+ .previous
+
+ .set noat
+ .align 4
+ .globl __copy_user
+ .ent __copy_user
+ # Pipeline info: Slotting & Comments
+__copy_user:
+ .prologue 0
+ subq $0, 32, $1 # .. E .. .. : Is this going to be a small copy?
+ beq $0, $zerolength # U .. .. .. : U L U L
+
+ and $6,7,$3 # .. .. .. E : is leading dest misalignment
+ ble $1, $onebyteloop # .. .. U .. : 1st branch : small amount of data
+ beq $3, $destaligned # .. U .. .. : 2nd (one cycle fetcher stall)
+ subq $3, 8, $3 # E .. .. .. : L U U L : trip counter
+/*
+ * The fetcher stall also hides the 1 cycle cross-cluster stall for $3 (L --> U)
+ * This loop aligns the destination a byte at a time
+ * We know we have at least one trip through this loop
+ */
+$aligndest:
+ EXI( ldbu $1,0($7) ) # .. .. .. L : Keep loads separate from stores
+ addq $6,1,$6 # .. .. E .. : Section 3.8 in the CWG
+ addq $3,1,$3 # .. E .. .. :
+ nop # E .. .. .. : U L U L
+
+/*
+ * the -1 is to compensate for the inc($6) done in a previous quadpack
+ * which allows us zero dependencies within either quadpack in the loop
+ */
+ EXO( stb $1,-1($6) ) # .. .. .. L :
+ addq $7,1,$7 # .. .. E .. : Section 3.8 in the CWG
+ subq $0,1,$0 # .. E .. .. :
+ bne $3, $aligndest # U .. .. .. : U L U L
+
+/*
+ * If we fell through into here, we have a minimum of 33 - 7 bytes
+ * If we arrived via branch, we have a minimum of 32 bytes
+ */
+$destaligned:
+ and $7,7,$1 # .. .. .. E : Check _current_ source alignment
+ bic $0,7,$4 # .. .. E .. : number bytes as a quadword loop
+ EXI( ldq_u $3,0($7) ) # .. L .. .. : Forward fetch for fallthrough code
+ beq $1,$quadaligned # U .. .. .. : U L U L
+
+/*
+ * In the worst case, we've just executed an ldq_u here from 0($7)
+ * and we'll repeat it once if we take the branch
+ */
+
+/* Misaligned quadword loop - not unrolled. Leave it that way. */
+$misquad:
+ EXI( ldq_u $2,8($7) ) # .. .. .. L :
+ subq $4,8,$4 # .. .. E .. :
+ extql $3,$7,$3 # .. U .. .. :
+ extqh $2,$7,$1 # U .. .. .. : U U L L
+
+ bis $3,$1,$1 # .. .. .. E :
+ EXO( stq $1,0($6) ) # .. .. L .. :
+ addq $7,8,$7 # .. E .. .. :
+ subq $0,8,$0 # E .. .. .. : U L L U
+
+ addq $6,8,$6 # .. .. .. E :
+ bis $2,$2,$3 # .. .. E .. :
+ nop # .. E .. .. :
+ bne $4,$misquad # U .. .. .. : U L U L
+
+ nop # .. .. .. E
+ nop # .. .. E ..
+ nop # .. E .. ..
+ beq $0,$zerolength # U .. .. .. : U L U L
+
+/* We know we have at least one trip through the byte loop */
+ EXI ( ldbu $2,0($7) ) # .. .. .. L : No loads in the same quad
+ addq $6,1,$6 # .. .. E .. : as the store (Section 3.8 in CWG)
+ nop # .. E .. .. :
+ br $31, $dirtyentry # L0 .. .. .. : L U U L
+/* Do the trailing byte loop load, then hop into the store part of the loop */
+
+/*
+ * A minimum of (33 - 7) bytes to do a quad at a time.
+ * Based upon the usage context, it's worth the effort to unroll this loop
+ * $0 - number of bytes to be moved
+ * $4 - number of bytes to move as quadwords
+ * $6 is current destination address
+ * $7 is current source address
+ */
+$quadaligned:
+ subq $4, 32, $2 # .. .. .. E : do not unroll for small stuff
+ nop # .. .. E ..
+ nop # .. E .. ..
+ blt $2, $onequad # U .. .. .. : U L U L
+
+/*
+ * There is a significant assumption here that the source and destination
+ * addresses differ by more than 32 bytes. In this particular case, a
+ * sparsity of registers further bounds this to be a minimum of 8 bytes.
+ * But if this isn't met, then the output result will be incorrect.
+ * Furthermore, due to a lack of available registers, we really can't
+ * unroll this to be an 8x loop (which would enable us to use the wh64
+ * instruction memory hint instruction).
+ */
+$unroll4:
+ EXI( ldq $1,0($7) ) # .. .. .. L
+ EXI( ldq $2,8($7) ) # .. .. L ..
+ subq $4,32,$4 # .. E .. ..
+ nop # E .. .. .. : U U L L
+
+ addq $7,16,$7 # .. .. .. E
+ EXO( stq $1,0($6) ) # .. .. L ..
+ EXO( stq $2,8($6) ) # .. L .. ..
+ subq $0,16,$0 # E .. .. .. : U L L U
+
+ addq $6,16,$6 # .. .. .. E
+ EXI( ldq $1,0($7) ) # .. .. L ..
+ EXI( ldq $2,8($7) ) # .. L .. ..
+ subq $4, 32, $3 # E .. .. .. : U U L L : is there enough for another trip?
+
+ EXO( stq $1,0($6) ) # .. .. .. L
+ EXO( stq $2,8($6) ) # .. .. L ..
+ subq $0,16,$0 # .. E .. ..
+ addq $7,16,$7 # E .. .. .. : U L L U
+
+ nop # .. .. .. E
+ nop # .. .. E ..
+ addq $6,16,$6 # .. E .. ..
+ bgt $3,$unroll4 # U .. .. .. : U L U L
+
+ nop
+ nop
+ nop
+ beq $4, $noquads
+
+$onequad:
+ EXI( ldq $1,0($7) )
+ subq $4,8,$4
+ addq $7,8,$7
+ nop
+
+ EXO( stq $1,0($6) )
+ subq $0,8,$0
+ addq $6,8,$6
+ bne $4,$onequad
+
+$noquads:
+ nop
+ nop
+ nop
+ beq $0,$zerolength
+
+/*
+ * For small copies (or the tail of a larger copy), do a very simple byte loop.
+ * There's no point in doing a lot of complex alignment calculations to try to
+ * to quadword stuff for a small amount of data.
+ * $0 - remaining number of bytes left to copy
+ * $6 - current dest addr
+ * $7 - current source addr
+ */
+
+$onebyteloop:
+ EXI ( ldbu $2,0($7) ) # .. .. .. L : No loads in the same quad
+ addq $6,1,$6 # .. .. E .. : as the store (Section 3.8 in CWG)
+ nop # .. E .. .. :
+ nop # E .. .. .. : U L U L
+
+$dirtyentry:
+/*
+ * the -1 is to compensate for the inc($6) done in a previous quadpack
+ * which allows us zero dependencies within either quadpack in the loop
+ */
+ EXO ( stb $2,-1($6) ) # .. .. .. L :
+ addq $7,1,$7 # .. .. E .. : quadpack as the load
+ subq $0,1,$0 # .. E .. .. : change count _after_ copy
+ bgt $0,$onebyteloop # U .. .. .. : U L U L
+
+$zerolength:
+$exitout: # Destination for exception recovery(?)
+ nop # .. .. .. E
+ nop # .. .. E ..
+ nop # .. E .. ..
+ ret $31,($28),1 # L0 .. .. .. : L U L U
+
+$exitin:
+
+ /* A stupid byte-by-byte zeroing of the rest of the output
+ buffer. This cures security holes by never leaving
+ random kernel data around to be copied elsewhere. */
+
+ nop
+ nop
+ nop
+ mov $0,$1
+
+$101:
+ EXO ( stb $31,0($6) ) # L
+ subq $1,1,$1 # E
+ addq $6,1,$6 # E
+ bgt $1,$101 # U
+
+ nop
+ nop
+ nop
+ ret $31,($28),1 # L0
+
+ .end __copy_user
+
diff --git a/arch/alpha/lib/ev6-csum_ipv6_magic.S b/arch/alpha/lib/ev6-csum_ipv6_magic.S
new file mode 100644
index 000000000000..de1948a69118
--- /dev/null
+++ b/arch/alpha/lib/ev6-csum_ipv6_magic.S
@@ -0,0 +1,126 @@
+/*
+ * arch/alpha/lib/ev6-csum_ipv6_magic.S
+ * 21264 version contributed by Rick Gorton <rick.gorton@alpha-processor.com>
+ *
+ * unsigned short csum_ipv6_magic(struct in6_addr *saddr,
+ * struct in6_addr *daddr,
+ * __u32 len,
+ * unsigned short proto,
+ * unsigned int csum);
+ *
+ * Much of the information about 21264 scheduling/coding comes from:
+ * Compiler Writer's Guide for the Alpha 21264
+ * abbreviated as 'CWG' in other comments here
+ * ftp.digital.com/pub/Digital/info/semiconductor/literature/dsc-library.html
+ * Scheduling notation:
+ * E - either cluster
+ * U - upper subcluster; U0 - subcluster U0; U1 - subcluster U1
+ * L - lower subcluster; L0 - subcluster L0; L1 - subcluster L1
+ * Try not to change the actual algorithm if possible for consistency.
+ * Determining actual stalls (other than slotting) doesn't appear to be easy to do.
+ *
+ * unsigned short csum_ipv6_magic(struct in6_addr *saddr,
+ * struct in6_addr *daddr,
+ * __u32 len,
+ * unsigned short proto,
+ * unsigned int csum);
+ *
+ * Swap <proto> (takes form 0xaabb)
+ * Then shift it left by 48, so result is:
+ * 0xbbaa0000 00000000
+ * Then turn it back into a sign extended 32-bit item
+ * 0xbbaa0000
+ *
+ * Swap <len> (an unsigned int) using Mike Burrows' 7-instruction sequence
+ * (we can't hide the 3-cycle latency of the unpkbw in the 6-instruction sequence)
+ * Assume input takes form 0xAABBCCDD
+ *
+ * Finally, original 'folding' approach is to split the long into 4 unsigned shorts
+ * add 4 ushorts, resulting in ushort/carry
+ * add carry bits + ushort --> ushort
+ * add carry bits + ushort --> ushort (in case the carry results in an overflow)
+ * Truncate to a ushort. (took 13 instructions)
+ * From doing some testing, using the approach in checksum.c:from64to16()
+ * results in the same outcome:
+ * split into 2 uints, add those, generating a ulong
+ * add the 3 low ushorts together, generating a uint
+ * a final add of the 2 lower ushorts
+ * truncating the result.
+ */
+
+ .globl csum_ipv6_magic
+ .align 4
+ .ent csum_ipv6_magic
+ .frame $30,0,$26,0
+csum_ipv6_magic:
+ .prologue 0
+
+ ldq $0,0($16) # L : Latency: 3
+ inslh $18,7,$4 # U : 0000000000AABBCC
+ ldq $1,8($16) # L : Latency: 3
+ sll $19,8,$7 # U : U L U L : 0x00000000 00aabb00
+
+ zapnot $20,15,$20 # U : zero extend incoming csum
+ ldq $2,0($17) # L : Latency: 3
+ sll $19,24,$19 # U : U L L U : 0x000000aa bb000000
+ inswl $18,3,$18 # U : 000000CCDD000000
+
+ ldq $3,8($17) # L : Latency: 3
+ bis $18,$4,$18 # E : 000000CCDDAABBCC
+ addl $19,$7,$19 # E : <sign bits>bbaabb00
+ nop # E : U L U L
+
+ addq $20,$0,$20 # E : begin summing the words
+ srl $18,16,$4 # U : 0000000000CCDDAA
+ zap $19,0x3,$19 # U : <sign bits>bbaa0000
+ nop # E : L U U L
+
+ cmpult $20,$0,$0 # E :
+ addq $20,$1,$20 # E :
+ zapnot $18,0xa,$18 # U : 00000000DD00BB00
+ zap $4,0xa,$4 # U : U U L L : 0000000000CC00AA
+
+ or $18,$4,$18 # E : 00000000DDCCBBAA
+ nop # E :
+ cmpult $20,$1,$1 # E :
+ addq $20,$2,$20 # E : U L U L
+
+ cmpult $20,$2,$2 # E :
+ addq $20,$3,$20 # E :
+ cmpult $20,$3,$3 # E : (1 cycle stall on $20)
+ addq $20,$18,$20 # E : U L U L (1 cycle stall on $20)
+
+ cmpult $20,$18,$18 # E :
+ addq $20,$19,$20 # E : (1 cycle stall on $20)
+ addq $0,$1,$0 # E : merge the carries back into the csum
+ addq $2,$3,$2 # E :
+
+ cmpult $20,$19,$19 # E :
+ addq $18,$19,$18 # E : (1 cycle stall on $19)
+ addq $0,$2,$0 # E :
+ addq $20,$18,$20 # E : U L U L :
+ /* (1 cycle stall on $18, 2 cycles on $20) */
+
+ addq $0,$20,$0 # E :
+ zapnot $0,15,$1 # U : Start folding output (1 cycle stall on $0)
+ nop # E :
+ srl $0,32,$0 # U : U L U L : (1 cycle stall on $0)
+
+ addq $1,$0,$1 # E : Finished generating ulong
+ extwl $1,2,$2 # U : ushort[1] (1 cycle stall on $1)
+ zapnot $1,3,$0 # U : ushort[0] (1 cycle stall on $1)
+ extwl $1,4,$1 # U : ushort[2] (1 cycle stall on $1)
+
+ addq $0,$2,$0 # E
+ addq $0,$1,$3 # E : Finished generating uint
+ /* (1 cycle stall on $0) */
+ extwl $3,2,$1 # U : ushort[1] (1 cycle stall on $3)
+ nop # E : L U L U
+
+ addq $1,$3,$0 # E : Final carry
+ not $0,$4 # E : complement (1 cycle stall on $0)
+ zapnot $4,3,$0 # U : clear upper garbage bits
+ /* (1 cycle stall on $4) */
+ ret # L0 : L U L U
+
+ .end csum_ipv6_magic
diff --git a/arch/alpha/lib/ev6-divide.S b/arch/alpha/lib/ev6-divide.S
new file mode 100644
index 000000000000..2a82b9be93fa
--- /dev/null
+++ b/arch/alpha/lib/ev6-divide.S
@@ -0,0 +1,259 @@
+/*
+ * arch/alpha/lib/ev6-divide.S
+ *
+ * 21264 version contributed by Rick Gorton <rick.gorton@alpha-processor.com>
+ *
+ * Alpha division..
+ */
+
+/*
+ * The alpha chip doesn't provide hardware division, so we have to do it
+ * by hand. The compiler expects the functions
+ *
+ * __divqu: 64-bit unsigned long divide
+ * __remqu: 64-bit unsigned long remainder
+ * __divqs/__remqs: signed 64-bit
+ * __divlu/__remlu: unsigned 32-bit
+ * __divls/__remls: signed 32-bit
+ *
+ * These are not normal C functions: instead of the normal
+ * calling sequence, these expect their arguments in registers
+ * $24 and $25, and return the result in $27. Register $28 may
+ * be clobbered (assembly temporary), anything else must be saved.
+ *
+ * In short: painful.
+ *
+ * This is a rather simple bit-at-a-time algorithm: it's very good
+ * at dividing random 64-bit numbers, but the more usual case where
+ * the divisor is small is handled better by the DEC algorithm
+ * using lookup tables. This uses much less memory, though, and is
+ * nicer on the cache.. Besides, I don't know the copyright status
+ * of the DEC code.
+ */
+
+/*
+ * My temporaries:
+ * $0 - current bit
+ * $1 - shifted divisor
+ * $2 - modulus/quotient
+ *
+ * $23 - return address
+ * $24 - dividend
+ * $25 - divisor
+ *
+ * $27 - quotient/modulus
+ * $28 - compare status
+ *
+ * Much of the information about 21264 scheduling/coding comes from:
+ * Compiler Writer's Guide for the Alpha 21264
+ * abbreviated as 'CWG' in other comments here
+ * ftp.digital.com/pub/Digital/info/semiconductor/literature/dsc-library.html
+ * Scheduling notation:
+ * E - either cluster
+ * U - upper subcluster; U0 - subcluster U0; U1 - subcluster U1
+ * L - lower subcluster; L0 - subcluster L0; L1 - subcluster L1
+ * Try not to change the actual algorithm if possible for consistency.
+ */
+
+#define halt .long 0
+
+/*
+ * Select function type and registers
+ */
+#define mask $0
+#define divisor $1
+#define compare $28
+#define tmp1 $3
+#define tmp2 $4
+
+#ifdef DIV
+#define DIV_ONLY(x,y...) x,##y
+#define MOD_ONLY(x,y...)
+#define func(x) __div##x
+#define modulus $2
+#define quotient $27
+#define GETSIGN(x) xor $24,$25,x
+#define STACK 48
+#else
+#define DIV_ONLY(x,y...)
+#define MOD_ONLY(x,y...) x,##y
+#define func(x) __rem##x
+#define modulus $27
+#define quotient $2
+#define GETSIGN(x) bis $24,$24,x
+#define STACK 32
+#endif
+
+/*
+ * For 32-bit operations, we need to extend to 64-bit
+ */
+#ifdef INTSIZE
+#define ufunction func(lu)
+#define sfunction func(l)
+#define LONGIFY(x) zapnot x,15,x
+#define SLONGIFY(x) addl x,0,x
+#else
+#define ufunction func(qu)
+#define sfunction func(q)
+#define LONGIFY(x)
+#define SLONGIFY(x)
+#endif
+
+.set noat
+.align 4
+.globl ufunction
+.ent ufunction
+ufunction:
+ subq $30,STACK,$30 # E :
+ .frame $30,STACK,$23
+ .prologue 0
+
+7: stq $1, 0($30) # L :
+ bis $25,$25,divisor # E :
+ stq $2, 8($30) # L : L U L U
+
+ bis $24,$24,modulus # E :
+ stq $0,16($30) # L :
+ bis $31,$31,quotient # E :
+ LONGIFY(divisor) # E : U L L U
+
+ stq tmp1,24($30) # L :
+ LONGIFY(modulus) # E :
+ bis $31,1,mask # E :
+ DIV_ONLY(stq tmp2,32($30)) # L : L U U L
+
+ beq divisor, 9f /* div by zero */
+ /*
+ * In spite of the DIV_ONLY being either a non-instruction
+ * or an actual stq, the addition of the .align directive
+ * below ensures that label 1 is going to be nicely aligned
+ */
+
+ .align 4
+#ifdef INTSIZE
+ /*
+ * shift divisor left, using 3-bit shifts for
+ * 32-bit divides as we can't overflow. Three-bit
+ * shifts will result in looping three times less
+ * here, but can result in two loops more later.
+ * Thus using a large shift isn't worth it (and
+ * s8add pairs better than a sll..)
+ */
+1: cmpult divisor,modulus,compare # E :
+ s8addq divisor,$31,divisor # E :
+ s8addq mask,$31,mask # E :
+ bne compare,1b # U : U L U L
+#else
+1: cmpult divisor,modulus,compare # E :
+ nop # E :
+ nop # E :
+ blt divisor, 2f # U : U L U L
+
+ addq divisor,divisor,divisor # E :
+ addq mask,mask,mask # E :
+ unop # E :
+ bne compare,1b # U : U L U L
+#endif
+
+ /* ok, start to go right again.. */
+2:
+ /*
+ * Keep things nicely bundled... use a nop instead of not
+ * having an instruction for DIV_ONLY
+ */
+#ifdef DIV
+ DIV_ONLY(addq quotient,mask,tmp2) # E :
+#else
+ nop # E :
+#endif
+ srl mask,1,mask # U :
+ cmpule divisor,modulus,compare # E :
+ subq modulus,divisor,tmp1 # E :
+
+#ifdef DIV
+ DIV_ONLY(cmovne compare,tmp2,quotient) # E : Latency 2, extra map slot
+ nop # E : as part of the cmovne
+ srl divisor,1,divisor # U :
+ nop # E : L U L U
+
+ nop # E :
+ cmovne compare,tmp1,modulus # E : Latency 2, extra map slot
+ nop # E : as part of the cmovne
+ bne mask,2b # U : U L U L
+#else
+ srl divisor,1,divisor # U :
+ cmovne compare,tmp1,modulus # E : Latency 2, extra map slot
+ nop # E : as part of the cmovne
+ bne mask,2b # U : U L L U
+#endif
+
+9: ldq $1, 0($30) # L :
+ ldq $2, 8($30) # L :
+ nop # E :
+ nop # E : U U L L
+
+ ldq $0,16($30) # L :
+ ldq tmp1,24($30) # L :
+ nop # E :
+ nop # E :
+
+#ifdef DIV
+ DIV_ONLY(ldq tmp2,32($30)) # L :
+#else
+ nop # E :
+#endif
+ addq $30,STACK,$30 # E :
+ ret $31,($23),1 # L0 : L U U L
+ .end ufunction
+
+/*
+ * Uhh.. Ugly signed division. I'd rather not have it at all, but
+ * it's needed in some circumstances. There are different ways to
+ * handle this, really. This does:
+ * -a / b = a / -b = -(a / b)
+ * -a % b = -(a % b)
+ * a % -b = a % b
+ * which is probably not the best solution, but at least should
+ * have the property that (x/y)*y + (x%y) = x.
+ */
+.align 4
+.globl sfunction
+.ent sfunction
+sfunction:
+ subq $30,STACK,$30 # E :
+ .frame $30,STACK,$23
+ .prologue 0
+ bis $24,$25,$28 # E :
+ SLONGIFY($28) # E :
+ bge $28,7b # U :
+
+ stq $24,0($30) # L :
+ subq $31,$24,$28 # E :
+ stq $25,8($30) # L :
+ nop # E : U L U L
+
+ cmovlt $24,$28,$24 /* abs($24) */ # E : Latency 2, extra map slot
+ nop # E : as part of the cmov
+ stq $23,16($30) # L :
+ subq $31,$25,$28 # E : U L U L
+
+ stq tmp1,24($30) # L :
+ cmovlt $25,$28,$25 /* abs($25) */ # E : Latency 2, extra map slot
+ nop # E :
+ bsr $23,ufunction # L0: L U L U
+
+ ldq $24,0($30) # L :
+ ldq $25,8($30) # L :
+ GETSIGN($28) # E :
+ subq $31,$27,tmp1 # E : U U L L
+
+ SLONGIFY($28) # E :
+ ldq $23,16($30) # L :
+ cmovlt $28,tmp1,$27 # E : Latency 2, extra map slot
+ nop # E : U L L U : as part of the cmov
+
+ ldq tmp1,24($30) # L :
+ nop # E : as part of the cmov
+ addq $30,STACK,$30 # E :
+ ret $31,($23),1 # L0 : L U U L
+ .end sfunction
diff --git a/arch/alpha/lib/ev6-memchr.S b/arch/alpha/lib/ev6-memchr.S
new file mode 100644
index 000000000000..a8e843dbcc23
--- /dev/null
+++ b/arch/alpha/lib/ev6-memchr.S
@@ -0,0 +1,191 @@
+/*
+ * arch/alpha/lib/ev6-memchr.S
+ *
+ * 21264 version contributed by Rick Gorton <rick.gorton@alpha-processor.com>
+ *
+ * Finds characters in a memory area. Optimized for the Alpha:
+ *
+ * - memory accessed as aligned quadwords only
+ * - uses cmpbge to compare 8 bytes in parallel
+ * - does binary search to find 0 byte in last
+ * quadword (HAKMEM needed 12 instructions to
+ * do this instead of the 9 instructions that
+ * binary search needs).
+ *
+ * For correctness consider that:
+ *
+ * - only minimum number of quadwords may be accessed
+ * - the third argument is an unsigned long
+ *
+ * Much of the information about 21264 scheduling/coding comes from:
+ * Compiler Writer's Guide for the Alpha 21264
+ * abbreviated as 'CWG' in other comments here
+ * ftp.digital.com/pub/Digital/info/semiconductor/literature/dsc-library.html
+ * Scheduling notation:
+ * E - either cluster
+ * U - upper subcluster; U0 - subcluster U0; U1 - subcluster U1
+ * L - lower subcluster; L0 - subcluster L0; L1 - subcluster L1
+ * Try not to change the actual algorithm if possible for consistency.
+ */
+
+ .set noreorder
+ .set noat
+
+ .align 4
+ .globl memchr
+ .ent memchr
+memchr:
+ .frame $30,0,$26,0
+ .prologue 0
+
+ # Hack -- if someone passes in (size_t)-1, hoping to just
+ # search til the end of the address space, we will overflow
+ # below when we find the address of the last byte. Given
+ # that we will never have a 56-bit address space, cropping
+ # the length is the easiest way to avoid trouble.
+ zap $18, 0x80, $5 # U : Bound length
+ beq $18, $not_found # U :
+ ldq_u $1, 0($16) # L : load first quadword Latency=3
+ and $17, 0xff, $17 # E : L L U U : 00000000000000ch
+
+ insbl $17, 1, $2 # U : 000000000000ch00
+ cmpult $18, 9, $4 # E : small (< 1 quad) string?
+ or $2, $17, $17 # E : 000000000000chch
+ lda $3, -1($31) # E : U L L U
+
+ sll $17, 16, $2 # U : 00000000chch0000
+ addq $16, $5, $5 # E : Max search address
+ or $2, $17, $17 # E : 00000000chchchch
+ sll $17, 32, $2 # U : U L L U : chchchch00000000
+
+ or $2, $17, $17 # E : chchchchchchchch
+ extql $1, $16, $7 # U : $7 is upper bits
+ beq $4, $first_quad # U :
+ ldq_u $6, -1($5) # L : L U U L : eight or less bytes to search Latency=3
+
+ extqh $6, $16, $6 # U : 2 cycle stall for $6
+ mov $16, $0 # E :
+ nop # E :
+ or $7, $6, $1 # E : L U L U $1 = quadword starting at $16
+
+ # Deal with the case where at most 8 bytes remain to be searched
+ # in $1. E.g.:
+ # $18 = 6
+ # $1 = ????c6c5c4c3c2c1
+$last_quad:
+ negq $18, $6 # E :
+ xor $17, $1, $1 # E :
+ srl $3, $6, $6 # U : $6 = mask of $18 bits set
+ cmpbge $31, $1, $2 # E : L U L U
+
+ nop
+ nop
+ and $2, $6, $2 # E :
+ beq $2, $not_found # U : U L U L
+
+$found_it:
+#if defined(__alpha_fix__) && defined(__alpha_cix__)
+ /*
+ * Since we are guaranteed to have set one of the bits, we don't
+ * have to worry about coming back with a 0x40 out of cttz...
+ */
+ cttz $2, $3 # U0 :
+ addq $0, $3, $0 # E : All done
+ nop # E :
+ ret # L0 : L U L U
+#else
+ /*
+ * Slow and clunky. It can probably be improved.
+ * An exercise left for others.
+ */
+ negq $2, $3 # E :
+ and $2, $3, $2 # E :
+ and $2, 0x0f, $1 # E :
+ addq $0, 4, $3 # E :
+
+ cmoveq $1, $3, $0 # E : Latency 2, extra map cycle
+ nop # E : keep with cmov
+ and $2, 0x33, $1 # E :
+ addq $0, 2, $3 # E : U L U L : 2 cycle stall on $0
+
+ cmoveq $1, $3, $0 # E : Latency 2, extra map cycle
+ nop # E : keep with cmov
+ and $2, 0x55, $1 # E :
+ addq $0, 1, $3 # E : U L U L : 2 cycle stall on $0
+
+ cmoveq $1, $3, $0 # E : Latency 2, extra map cycle
+ nop
+ nop
+ ret # L0 : L U L U
+#endif
+
+ # Deal with the case where $18 > 8 bytes remain to be
+ # searched. $16 may not be aligned.
+ .align 4
+$first_quad:
+ andnot $16, 0x7, $0 # E :
+ insqh $3, $16, $2 # U : $2 = 0000ffffffffffff ($16<0:2> ff)
+ xor $1, $17, $1 # E :
+ or $1, $2, $1 # E : U L U L $1 = ====ffffffffffff
+
+ cmpbge $31, $1, $2 # E :
+ bne $2, $found_it # U :
+ # At least one byte left to process.
+ ldq $1, 8($0) # L :
+ subq $5, 1, $18 # E : U L U L
+
+ addq $0, 8, $0 # E :
+ # Make $18 point to last quad to be accessed (the
+ # last quad may or may not be partial).
+ andnot $18, 0x7, $18 # E :
+ cmpult $0, $18, $2 # E :
+ beq $2, $final # U : U L U L
+
+ # At least two quads remain to be accessed.
+
+ subq $18, $0, $4 # E : $4 <- nr quads to be processed
+ and $4, 8, $4 # E : odd number of quads?
+ bne $4, $odd_quad_count # U :
+ # At least three quads remain to be accessed
+ mov $1, $4 # E : L U L U : move prefetched value to correct reg
+
+ .align 4
+$unrolled_loop:
+ ldq $1, 8($0) # L : prefetch $1
+ xor $17, $4, $2 # E :
+ cmpbge $31, $2, $2 # E :
+ bne $2, $found_it # U : U L U L
+
+ addq $0, 8, $0 # E :
+ nop # E :
+ nop # E :
+ nop # E :
+
+$odd_quad_count:
+ xor $17, $1, $2 # E :
+ ldq $4, 8($0) # L : prefetch $4
+ cmpbge $31, $2, $2 # E :
+ addq $0, 8, $6 # E :
+
+ bne $2, $found_it # U :
+ cmpult $6, $18, $6 # E :
+ addq $0, 8, $0 # E :
+ nop # E :
+
+ bne $6, $unrolled_loop # U :
+ mov $4, $1 # E : move prefetched value into $1
+ nop # E :
+ nop # E :
+
+$final: subq $5, $0, $18 # E : $18 <- number of bytes left to do
+ nop # E :
+ nop # E :
+ bne $18, $last_quad # U :
+
+$not_found:
+ mov $31, $0 # E :
+ nop # E :
+ nop # E :
+ ret # L0 :
+
+ .end memchr
diff --git a/arch/alpha/lib/ev6-memcpy.S b/arch/alpha/lib/ev6-memcpy.S
new file mode 100644
index 000000000000..52b37b0f2af5
--- /dev/null
+++ b/arch/alpha/lib/ev6-memcpy.S
@@ -0,0 +1,248 @@
+/*
+ * arch/alpha/lib/ev6-memcpy.S
+ * 21264 version by Rick Gorton <rick.gorton@alpha-processor.com>
+ *
+ * Reasonably optimized memcpy() routine for the Alpha 21264
+ *
+ * - memory accessed as aligned quadwords only
+ * - uses bcmpge to compare 8 bytes in parallel
+ *
+ * Much of the information about 21264 scheduling/coding comes from:
+ * Compiler Writer's Guide for the Alpha 21264
+ * abbreviated as 'CWG' in other comments here
+ * ftp.digital.com/pub/Digital/info/semiconductor/literature/dsc-library.html
+ * Scheduling notation:
+ * E - either cluster
+ * U - upper subcluster; U0 - subcluster U0; U1 - subcluster U1
+ * L - lower subcluster; L0 - subcluster L0; L1 - subcluster L1
+ *
+ * Temp usage notes:
+ * $1,$2, - scratch
+ */
+
+ .set noreorder
+ .set noat
+
+ .align 4
+ .globl memcpy
+ .ent memcpy
+memcpy:
+ .frame $30,0,$26,0
+ .prologue 0
+
+ mov $16, $0 # E : copy dest to return
+ ble $18, $nomoredata # U : done with the copy?
+ xor $16, $17, $1 # E : are source and dest alignments the same?
+ and $1, 7, $1 # E : are they the same mod 8?
+
+ bne $1, $misaligned # U : Nope - gotta do this the slow way
+ /* source and dest are same mod 8 address */
+ and $16, 7, $1 # E : Are both 0mod8?
+ beq $1, $both_0mod8 # U : Yes
+ nop # E :
+
+ /*
+ * source and dest are same misalignment. move a byte at a time
+ * until a 0mod8 alignment for both is reached.
+ * At least one byte more to move
+ */
+
+$head_align:
+ ldbu $1, 0($17) # L : grab a byte
+ subq $18, 1, $18 # E : count--
+ addq $17, 1, $17 # E : src++
+ stb $1, 0($16) # L :
+ addq $16, 1, $16 # E : dest++
+ and $16, 7, $1 # E : Are we at 0mod8 yet?
+ ble $18, $nomoredata # U : done with the copy?
+ bne $1, $head_align # U :
+
+$both_0mod8:
+ cmple $18, 127, $1 # E : Can we unroll the loop?
+ bne $1, $no_unroll # U :
+ and $16, 63, $1 # E : get mod64 alignment
+ beq $1, $do_unroll # U : no single quads to fiddle
+
+$single_head_quad:
+ ldq $1, 0($17) # L : get 8 bytes
+ subq $18, 8, $18 # E : count -= 8
+ addq $17, 8, $17 # E : src += 8
+ nop # E :
+
+ stq $1, 0($16) # L : store
+ addq $16, 8, $16 # E : dest += 8
+ and $16, 63, $1 # E : get mod64 alignment
+ bne $1, $single_head_quad # U : still not fully aligned
+
+$do_unroll:
+ addq $16, 64, $7 # E : Initial (+1 trip) wh64 address
+ cmple $18, 127, $1 # E : Can we go through the unrolled loop?
+ bne $1, $tail_quads # U : Nope
+ nop # E :
+
+$unroll_body:
+ wh64 ($7) # L1 : memory subsystem hint: 64 bytes at
+ # ($7) are about to be over-written
+ ldq $6, 0($17) # L0 : bytes 0..7
+ nop # E :
+ nop # E :
+
+ ldq $4, 8($17) # L : bytes 8..15
+ ldq $5, 16($17) # L : bytes 16..23
+ addq $7, 64, $7 # E : Update next wh64 address
+ nop # E :
+
+ ldq $3, 24($17) # L : bytes 24..31
+ addq $16, 64, $1 # E : fallback value for wh64
+ nop # E :
+ nop # E :
+
+ addq $17, 32, $17 # E : src += 32 bytes
+ stq $6, 0($16) # L : bytes 0..7
+ nop # E :
+ nop # E :
+
+ stq $4, 8($16) # L : bytes 8..15
+ stq $5, 16($16) # L : bytes 16..23
+ subq $18, 192, $2 # E : At least two more trips to go?
+ nop # E :
+
+ stq $3, 24($16) # L : bytes 24..31
+ addq $16, 32, $16 # E : dest += 32 bytes
+ nop # E :
+ nop # E :
+
+ ldq $6, 0($17) # L : bytes 0..7
+ ldq $4, 8($17) # L : bytes 8..15
+ cmovlt $2, $1, $7 # E : Latency 2, extra map slot - Use
+ # fallback wh64 address if < 2 more trips
+ nop # E :
+
+ ldq $5, 16($17) # L : bytes 16..23
+ ldq $3, 24($17) # L : bytes 24..31
+ addq $16, 32, $16 # E : dest += 32
+ subq $18, 64, $18 # E : count -= 64
+
+ addq $17, 32, $17 # E : src += 32
+ stq $6, -32($16) # L : bytes 0..7
+ stq $4, -24($16) # L : bytes 8..15
+ cmple $18, 63, $1 # E : At least one more trip?
+
+ stq $5, -16($16) # L : bytes 16..23
+ stq $3, -8($16) # L : bytes 24..31
+ nop # E :
+ beq $1, $unroll_body
+
+$tail_quads:
+$no_unroll:
+ .align 4
+ subq $18, 8, $18 # E : At least a quad left?
+ blt $18, $less_than_8 # U : Nope
+ nop # E :
+ nop # E :
+
+$move_a_quad:
+ ldq $1, 0($17) # L : fetch 8
+ subq $18, 8, $18 # E : count -= 8
+ addq $17, 8, $17 # E : src += 8
+ nop # E :
+
+ stq $1, 0($16) # L : store 8
+ addq $16, 8, $16 # E : dest += 8
+ bge $18, $move_a_quad # U :
+ nop # E :
+
+$less_than_8:
+ .align 4
+ addq $18, 8, $18 # E : add back for trailing bytes
+ ble $18, $nomoredata # U : All-done
+ nop # E :
+ nop # E :
+
+ /* Trailing bytes */
+$tail_bytes:
+ subq $18, 1, $18 # E : count--
+ ldbu $1, 0($17) # L : fetch a byte
+ addq $17, 1, $17 # E : src++
+ nop # E :
+
+ stb $1, 0($16) # L : store a byte
+ addq $16, 1, $16 # E : dest++
+ bgt $18, $tail_bytes # U : more to be done?
+ nop # E :
+
+ /* branching to exit takes 3 extra cycles, so replicate exit here */
+ ret $31, ($26), 1 # L0 :
+ nop # E :
+ nop # E :
+ nop # E :
+
+$misaligned:
+ mov $0, $4 # E : dest temp
+ and $0, 7, $1 # E : dest alignment mod8
+ beq $1, $dest_0mod8 # U : life doesnt totally suck
+ nop
+
+$aligndest:
+ ble $18, $nomoredata # U :
+ ldbu $1, 0($17) # L : fetch a byte
+ subq $18, 1, $18 # E : count--
+ addq $17, 1, $17 # E : src++
+
+ stb $1, 0($4) # L : store it
+ addq $4, 1, $4 # E : dest++
+ and $4, 7, $1 # E : dest 0mod8 yet?
+ bne $1, $aligndest # U : go until we are aligned.
+
+ /* Source has unknown alignment, but dest is known to be 0mod8 */
+$dest_0mod8:
+ subq $18, 8, $18 # E : At least a quad left?
+ blt $18, $misalign_tail # U : Nope
+ ldq_u $3, 0($17) # L : seed (rotating load) of 8 bytes
+ nop # E :
+
+$mis_quad:
+ ldq_u $16, 8($17) # L : Fetch next 8
+ extql $3, $17, $3 # U : masking
+ extqh $16, $17, $1 # U : masking
+ bis $3, $1, $1 # E : merged bytes to store
+
+ subq $18, 8, $18 # E : count -= 8
+ addq $17, 8, $17 # E : src += 8
+ stq $1, 0($4) # L : store 8 (aligned)
+ mov $16, $3 # E : "rotate" source data
+
+ addq $4, 8, $4 # E : dest += 8
+ bge $18, $mis_quad # U : More quads to move
+ nop
+ nop
+
+$misalign_tail:
+ addq $18, 8, $18 # E : account for tail stuff
+ ble $18, $nomoredata # U :
+ nop
+ nop
+
+$misalign_byte:
+ ldbu $1, 0($17) # L : fetch 1
+ subq $18, 1, $18 # E : count--
+ addq $17, 1, $17 # E : src++
+ nop # E :
+
+ stb $1, 0($4) # L : store
+ addq $4, 1, $4 # E : dest++
+ bgt $18, $misalign_byte # U : more to go?
+ nop
+
+
+$nomoredata:
+ ret $31, ($26), 1 # L0 :
+ nop # E :
+ nop # E :
+ nop # E :
+
+ .end memcpy
+
+/* For backwards module compatibility. */
+__memcpy = memcpy
+.globl __memcpy
diff --git a/arch/alpha/lib/ev6-memset.S b/arch/alpha/lib/ev6-memset.S
new file mode 100644
index 000000000000..d8b94e1c7fca
--- /dev/null
+++ b/arch/alpha/lib/ev6-memset.S
@@ -0,0 +1,597 @@
+/*
+ * arch/alpha/lib/ev6-memset.S
+ *
+ * This is an efficient (and relatively small) implementation of the C library
+ * "memset()" function for the 21264 implementation of Alpha.
+ *
+ * 21264 version contributed by Rick Gorton <rick.gorton@alpha-processor.com>
+ *
+ * Much of the information about 21264 scheduling/coding comes from:
+ * Compiler Writer's Guide for the Alpha 21264
+ * abbreviated as 'CWG' in other comments here
+ * ftp.digital.com/pub/Digital/info/semiconductor/literature/dsc-library.html
+ * Scheduling notation:
+ * E - either cluster
+ * U - upper subcluster; U0 - subcluster U0; U1 - subcluster U1
+ * L - lower subcluster; L0 - subcluster L0; L1 - subcluster L1
+ * The algorithm for the leading and trailing quadwords remains the same,
+ * however the loop has been unrolled to enable better memory throughput,
+ * and the code has been replicated for each of the entry points: __memset
+ * and __memsetw to permit better scheduling to eliminate the stalling
+ * encountered during the mask replication.
+ * A future enhancement might be to put in a byte store loop for really
+ * small (say < 32 bytes) memset()s. Whether or not that change would be
+ * a win in the kernel would depend upon the contextual usage.
+ * WARNING: Maintaining this is going to be more work than the above version,
+ * as fixes will need to be made in multiple places. The performance gain
+ * is worth it.
+ */
+
+ .set noat
+ .set noreorder
+.text
+ .globl __memset
+ .globl __memsetw
+ .globl __constant_c_memset
+ .globl memset
+
+ .ent __memset
+.align 5
+__memset:
+ .frame $30,0,$26,0
+ .prologue 0
+
+ /*
+ * Serious stalling happens. The only way to mitigate this is to
+ * undertake a major re-write to interleave the constant materialization
+ * with other parts of the fall-through code. This is important, even
+ * though it makes maintenance tougher.
+ * Do this later.
+ */
+ and $17,255,$1 # E : 00000000000000ch
+ insbl $17,1,$2 # U : 000000000000ch00
+ bis $16,$16,$0 # E : return value
+ ble $18,end_b # U : zero length requested?
+
+ addq $18,$16,$6 # E : max address to write to
+ bis $1,$2,$17 # E : 000000000000chch
+ insbl $1,2,$3 # U : 0000000000ch0000
+ insbl $1,3,$4 # U : 00000000ch000000
+
+ or $3,$4,$3 # E : 00000000chch0000
+ inswl $17,4,$5 # U : 0000chch00000000
+ xor $16,$6,$1 # E : will complete write be within one quadword?
+ inswl $17,6,$2 # U : chch000000000000
+
+ or $17,$3,$17 # E : 00000000chchchch
+ or $2,$5,$2 # E : chchchch00000000
+ bic $1,7,$1 # E : fit within a single quadword?
+ and $16,7,$3 # E : Target addr misalignment
+
+ or $17,$2,$17 # E : chchchchchchchch
+ beq $1,within_quad_b # U :
+ nop # E :
+ beq $3,aligned_b # U : target is 0mod8
+
+ /*
+ * Target address is misaligned, and won't fit within a quadword
+ */
+ ldq_u $4,0($16) # L : Fetch first partial
+ bis $16,$16,$5 # E : Save the address
+ insql $17,$16,$2 # U : Insert new bytes
+ subq $3,8,$3 # E : Invert (for addressing uses)
+
+ addq $18,$3,$18 # E : $18 is new count ($3 is negative)
+ mskql $4,$16,$4 # U : clear relevant parts of the quad
+ subq $16,$3,$16 # E : $16 is new aligned destination
+ bis $2,$4,$1 # E : Final bytes
+
+ nop
+ stq_u $1,0($5) # L : Store result
+ nop
+ nop
+
+.align 4
+aligned_b:
+ /*
+ * We are now guaranteed to be quad aligned, with at least
+ * one partial quad to write.
+ */
+
+ sra $18,3,$3 # U : Number of remaining quads to write
+ and $18,7,$18 # E : Number of trailing bytes to write
+ bis $16,$16,$5 # E : Save dest address
+ beq $3,no_quad_b # U : tail stuff only
+
+ /*
+ * it's worth the effort to unroll this and use wh64 if possible
+ * Lifted a bunch of code from clear_user.S
+ * At this point, entry values are:
+ * $16 Current destination address
+ * $5 A copy of $16
+ * $6 The max quadword address to write to
+ * $18 Number trailer bytes
+ * $3 Number quads to write
+ */
+
+ and $16, 0x3f, $2 # E : Forward work (only useful for unrolled loop)
+ subq $3, 16, $4 # E : Only try to unroll if > 128 bytes
+ subq $2, 0x40, $1 # E : bias counter (aligning stuff 0mod64)
+ blt $4, loop_b # U :
+
+ /*
+ * We know we've got at least 16 quads, minimum of one trip
+ * through unrolled loop. Do a quad at a time to get us 0mod64
+ * aligned.
+ */
+
+ nop # E :
+ nop # E :
+ nop # E :
+ beq $1, $bigalign_b # U :
+
+$alignmod64_b:
+ stq $17, 0($5) # L :
+ subq $3, 1, $3 # E : For consistency later
+ addq $1, 8, $1 # E : Increment towards zero for alignment
+ addq $5, 8, $4 # E : Initial wh64 address (filler instruction)
+
+ nop
+ nop
+ addq $5, 8, $5 # E : Inc address
+ blt $1, $alignmod64_b # U :
+
+$bigalign_b:
+ /*
+ * $3 - number quads left to go
+ * $5 - target address (aligned 0mod64)
+ * $17 - mask of stuff to store
+ * Scratch registers available: $7, $2, $4, $1
+ * we know that we'll be taking a minimum of one trip through
+ * CWG Section 3.7.6: do not expect a sustained store rate of > 1/cycle
+ * Assumes the wh64 needs to be for 2 trips through the loop in the future
+ * The wh64 is issued on for the starting destination address for trip +2
+ * through the loop, and if there are less than two trips left, the target
+ * address will be for the current trip.
+ */
+
+$do_wh64_b:
+ wh64 ($4) # L1 : memory subsystem write hint
+ subq $3, 24, $2 # E : For determining future wh64 addresses
+ stq $17, 0($5) # L :
+ nop # E :
+
+ addq $5, 128, $4 # E : speculative target of next wh64
+ stq $17, 8($5) # L :
+ stq $17, 16($5) # L :
+ addq $5, 64, $7 # E : Fallback address for wh64 (== next trip addr)
+
+ stq $17, 24($5) # L :
+ stq $17, 32($5) # L :
+ cmovlt $2, $7, $4 # E : Latency 2, extra mapping cycle
+ nop
+
+ stq $17, 40($5) # L :
+ stq $17, 48($5) # L :
+ subq $3, 16, $2 # E : Repeat the loop at least once more?
+ nop
+
+ stq $17, 56($5) # L :
+ addq $5, 64, $5 # E :
+ subq $3, 8, $3 # E :
+ bge $2, $do_wh64_b # U :
+
+ nop
+ nop
+ nop
+ beq $3, no_quad_b # U : Might have finished already
+
+.align 4
+ /*
+ * Simple loop for trailing quadwords, or for small amounts
+ * of data (where we can't use an unrolled loop and wh64)
+ */
+loop_b:
+ stq $17,0($5) # L :
+ subq $3,1,$3 # E : Decrement number quads left
+ addq $5,8,$5 # E : Inc address
+ bne $3,loop_b # U : more?
+
+no_quad_b:
+ /*
+ * Write 0..7 trailing bytes.
+ */
+ nop # E :
+ beq $18,end_b # U : All done?
+ ldq $7,0($5) # L :
+ mskqh $7,$6,$2 # U : Mask final quad
+
+ insqh $17,$6,$4 # U : New bits
+ bis $2,$4,$1 # E : Put it all together
+ stq $1,0($5) # L : And back to memory
+ ret $31,($26),1 # L0 :
+
+within_quad_b:
+ ldq_u $1,0($16) # L :
+ insql $17,$16,$2 # U : New bits
+ mskql $1,$16,$4 # U : Clear old
+ bis $2,$4,$2 # E : New result
+
+ mskql $2,$6,$4 # U :
+ mskqh $1,$6,$2 # U :
+ bis $2,$4,$1 # E :
+ stq_u $1,0($16) # L :
+
+end_b:
+ nop
+ nop
+ nop
+ ret $31,($26),1 # L0 :
+ .end __memset
+
+ /*
+ * This is the original body of code, prior to replication and
+ * rescheduling. Leave it here, as there may be calls to this
+ * entry point.
+ */
+.align 4
+ .ent __constant_c_memset
+__constant_c_memset:
+ .frame $30,0,$26,0
+ .prologue 0
+
+ addq $18,$16,$6 # E : max address to write to
+ bis $16,$16,$0 # E : return value
+ xor $16,$6,$1 # E : will complete write be within one quadword?
+ ble $18,end # U : zero length requested?
+
+ bic $1,7,$1 # E : fit within a single quadword
+ beq $1,within_one_quad # U :
+ and $16,7,$3 # E : Target addr misalignment
+ beq $3,aligned # U : target is 0mod8
+
+ /*
+ * Target address is misaligned, and won't fit within a quadword
+ */
+ ldq_u $4,0($16) # L : Fetch first partial
+ bis $16,$16,$5 # E : Save the address
+ insql $17,$16,$2 # U : Insert new bytes
+ subq $3,8,$3 # E : Invert (for addressing uses)
+
+ addq $18,$3,$18 # E : $18 is new count ($3 is negative)
+ mskql $4,$16,$4 # U : clear relevant parts of the quad
+ subq $16,$3,$16 # E : $16 is new aligned destination
+ bis $2,$4,$1 # E : Final bytes
+
+ nop
+ stq_u $1,0($5) # L : Store result
+ nop
+ nop
+
+.align 4
+aligned:
+ /*
+ * We are now guaranteed to be quad aligned, with at least
+ * one partial quad to write.
+ */
+
+ sra $18,3,$3 # U : Number of remaining quads to write
+ and $18,7,$18 # E : Number of trailing bytes to write
+ bis $16,$16,$5 # E : Save dest address
+ beq $3,no_quad # U : tail stuff only
+
+ /*
+ * it's worth the effort to unroll this and use wh64 if possible
+ * Lifted a bunch of code from clear_user.S
+ * At this point, entry values are:
+ * $16 Current destination address
+ * $5 A copy of $16
+ * $6 The max quadword address to write to
+ * $18 Number trailer bytes
+ * $3 Number quads to write
+ */
+
+ and $16, 0x3f, $2 # E : Forward work (only useful for unrolled loop)
+ subq $3, 16, $4 # E : Only try to unroll if > 128 bytes
+ subq $2, 0x40, $1 # E : bias counter (aligning stuff 0mod64)
+ blt $4, loop # U :
+
+ /*
+ * We know we've got at least 16 quads, minimum of one trip
+ * through unrolled loop. Do a quad at a time to get us 0mod64
+ * aligned.
+ */
+
+ nop # E :
+ nop # E :
+ nop # E :
+ beq $1, $bigalign # U :
+
+$alignmod64:
+ stq $17, 0($5) # L :
+ subq $3, 1, $3 # E : For consistency later
+ addq $1, 8, $1 # E : Increment towards zero for alignment
+ addq $5, 8, $4 # E : Initial wh64 address (filler instruction)
+
+ nop
+ nop
+ addq $5, 8, $5 # E : Inc address
+ blt $1, $alignmod64 # U :
+
+$bigalign:
+ /*
+ * $3 - number quads left to go
+ * $5 - target address (aligned 0mod64)
+ * $17 - mask of stuff to store
+ * Scratch registers available: $7, $2, $4, $1
+ * we know that we'll be taking a minimum of one trip through
+ * CWG Section 3.7.6: do not expect a sustained store rate of > 1/cycle
+ * Assumes the wh64 needs to be for 2 trips through the loop in the future
+ * The wh64 is issued on for the starting destination address for trip +2
+ * through the loop, and if there are less than two trips left, the target
+ * address will be for the current trip.
+ */
+
+$do_wh64:
+ wh64 ($4) # L1 : memory subsystem write hint
+ subq $3, 24, $2 # E : For determining future wh64 addresses
+ stq $17, 0($5) # L :
+ nop # E :
+
+ addq $5, 128, $4 # E : speculative target of next wh64
+ stq $17, 8($5) # L :
+ stq $17, 16($5) # L :
+ addq $5, 64, $7 # E : Fallback address for wh64 (== next trip addr)
+
+ stq $17, 24($5) # L :
+ stq $17, 32($5) # L :
+ cmovlt $2, $7, $4 # E : Latency 2, extra mapping cycle
+ nop
+
+ stq $17, 40($5) # L :
+ stq $17, 48($5) # L :
+ subq $3, 16, $2 # E : Repeat the loop at least once more?
+ nop
+
+ stq $17, 56($5) # L :
+ addq $5, 64, $5 # E :
+ subq $3, 8, $3 # E :
+ bge $2, $do_wh64 # U :
+
+ nop
+ nop
+ nop
+ beq $3, no_quad # U : Might have finished already
+
+.align 4
+ /*
+ * Simple loop for trailing quadwords, or for small amounts
+ * of data (where we can't use an unrolled loop and wh64)
+ */
+loop:
+ stq $17,0($5) # L :
+ subq $3,1,$3 # E : Decrement number quads left
+ addq $5,8,$5 # E : Inc address
+ bne $3,loop # U : more?
+
+no_quad:
+ /*
+ * Write 0..7 trailing bytes.
+ */
+ nop # E :
+ beq $18,end # U : All done?
+ ldq $7,0($5) # L :
+ mskqh $7,$6,$2 # U : Mask final quad
+
+ insqh $17,$6,$4 # U : New bits
+ bis $2,$4,$1 # E : Put it all together
+ stq $1,0($5) # L : And back to memory
+ ret $31,($26),1 # L0 :
+
+within_one_quad:
+ ldq_u $1,0($16) # L :
+ insql $17,$16,$2 # U : New bits
+ mskql $1,$16,$4 # U : Clear old
+ bis $2,$4,$2 # E : New result
+
+ mskql $2,$6,$4 # U :
+ mskqh $1,$6,$2 # U :
+ bis $2,$4,$1 # E :
+ stq_u $1,0($16) # L :
+
+end:
+ nop
+ nop
+ nop
+ ret $31,($26),1 # L0 :
+ .end __constant_c_memset
+
+ /*
+ * This is a replicant of the __constant_c_memset code, rescheduled
+ * to mask stalls. Note that entry point names also had to change
+ */
+ .align 5
+ .ent __memsetw
+
+__memsetw:
+ .frame $30,0,$26,0
+ .prologue 0
+
+ inswl $17,0,$5 # U : 000000000000c1c2
+ inswl $17,2,$2 # U : 00000000c1c20000
+ bis $16,$16,$0 # E : return value
+ addq $18,$16,$6 # E : max address to write to
+
+ ble $18, end_w # U : zero length requested?
+ inswl $17,4,$3 # U : 0000c1c200000000
+ inswl $17,6,$4 # U : c1c2000000000000
+ xor $16,$6,$1 # E : will complete write be within one quadword?
+
+ or $2,$5,$2 # E : 00000000c1c2c1c2
+ or $3,$4,$17 # E : c1c2c1c200000000
+ bic $1,7,$1 # E : fit within a single quadword
+ and $16,7,$3 # E : Target addr misalignment
+
+ or $17,$2,$17 # E : c1c2c1c2c1c2c1c2
+ beq $1,within_quad_w # U :
+ nop
+ beq $3,aligned_w # U : target is 0mod8
+
+ /*
+ * Target address is misaligned, and won't fit within a quadword
+ */
+ ldq_u $4,0($16) # L : Fetch first partial
+ bis $16,$16,$5 # E : Save the address
+ insql $17,$16,$2 # U : Insert new bytes
+ subq $3,8,$3 # E : Invert (for addressing uses)
+
+ addq $18,$3,$18 # E : $18 is new count ($3 is negative)
+ mskql $4,$16,$4 # U : clear relevant parts of the quad
+ subq $16,$3,$16 # E : $16 is new aligned destination
+ bis $2,$4,$1 # E : Final bytes
+
+ nop
+ stq_u $1,0($5) # L : Store result
+ nop
+ nop
+
+.align 4
+aligned_w:
+ /*
+ * We are now guaranteed to be quad aligned, with at least
+ * one partial quad to write.
+ */
+
+ sra $18,3,$3 # U : Number of remaining quads to write
+ and $18,7,$18 # E : Number of trailing bytes to write
+ bis $16,$16,$5 # E : Save dest address
+ beq $3,no_quad_w # U : tail stuff only
+
+ /*
+ * it's worth the effort to unroll this and use wh64 if possible
+ * Lifted a bunch of code from clear_user.S
+ * At this point, entry values are:
+ * $16 Current destination address
+ * $5 A copy of $16
+ * $6 The max quadword address to write to
+ * $18 Number trailer bytes
+ * $3 Number quads to write
+ */
+
+ and $16, 0x3f, $2 # E : Forward work (only useful for unrolled loop)
+ subq $3, 16, $4 # E : Only try to unroll if > 128 bytes
+ subq $2, 0x40, $1 # E : bias counter (aligning stuff 0mod64)
+ blt $4, loop_w # U :
+
+ /*
+ * We know we've got at least 16 quads, minimum of one trip
+ * through unrolled loop. Do a quad at a time to get us 0mod64
+ * aligned.
+ */
+
+ nop # E :
+ nop # E :
+ nop # E :
+ beq $1, $bigalign_w # U :
+
+$alignmod64_w:
+ stq $17, 0($5) # L :
+ subq $3, 1, $3 # E : For consistency later
+ addq $1, 8, $1 # E : Increment towards zero for alignment
+ addq $5, 8, $4 # E : Initial wh64 address (filler instruction)
+
+ nop
+ nop
+ addq $5, 8, $5 # E : Inc address
+ blt $1, $alignmod64_w # U :
+
+$bigalign_w:
+ /*
+ * $3 - number quads left to go
+ * $5 - target address (aligned 0mod64)
+ * $17 - mask of stuff to store
+ * Scratch registers available: $7, $2, $4, $1
+ * we know that we'll be taking a minimum of one trip through
+ * CWG Section 3.7.6: do not expect a sustained store rate of > 1/cycle
+ * Assumes the wh64 needs to be for 2 trips through the loop in the future
+ * The wh64 is issued on for the starting destination address for trip +2
+ * through the loop, and if there are less than two trips left, the target
+ * address will be for the current trip.
+ */
+
+$do_wh64_w:
+ wh64 ($4) # L1 : memory subsystem write hint
+ subq $3, 24, $2 # E : For determining future wh64 addresses
+ stq $17, 0($5) # L :
+ nop # E :
+
+ addq $5, 128, $4 # E : speculative target of next wh64
+ stq $17, 8($5) # L :
+ stq $17, 16($5) # L :
+ addq $5, 64, $7 # E : Fallback address for wh64 (== next trip addr)
+
+ stq $17, 24($5) # L :
+ stq $17, 32($5) # L :
+ cmovlt $2, $7, $4 # E : Latency 2, extra mapping cycle
+ nop
+
+ stq $17, 40($5) # L :
+ stq $17, 48($5) # L :
+ subq $3, 16, $2 # E : Repeat the loop at least once more?
+ nop
+
+ stq $17, 56($5) # L :
+ addq $5, 64, $5 # E :
+ subq $3, 8, $3 # E :
+ bge $2, $do_wh64_w # U :
+
+ nop
+ nop
+ nop
+ beq $3, no_quad_w # U : Might have finished already
+
+.align 4
+ /*
+ * Simple loop for trailing quadwords, or for small amounts
+ * of data (where we can't use an unrolled loop and wh64)
+ */
+loop_w:
+ stq $17,0($5) # L :
+ subq $3,1,$3 # E : Decrement number quads left
+ addq $5,8,$5 # E : Inc address
+ bne $3,loop_w # U : more?
+
+no_quad_w:
+ /*
+ * Write 0..7 trailing bytes.
+ */
+ nop # E :
+ beq $18,end_w # U : All done?
+ ldq $7,0($5) # L :
+ mskqh $7,$6,$2 # U : Mask final quad
+
+ insqh $17,$6,$4 # U : New bits
+ bis $2,$4,$1 # E : Put it all together
+ stq $1,0($5) # L : And back to memory
+ ret $31,($26),1 # L0 :
+
+within_quad_w:
+ ldq_u $1,0($16) # L :
+ insql $17,$16,$2 # U : New bits
+ mskql $1,$16,$4 # U : Clear old
+ bis $2,$4,$2 # E : New result
+
+ mskql $2,$6,$4 # U :
+ mskqh $1,$6,$2 # U :
+ bis $2,$4,$1 # E :
+ stq_u $1,0($16) # L :
+
+end_w:
+ nop
+ nop
+ nop
+ ret $31,($26),1 # L0 :
+
+ .end __memsetw
+
+memset = __memset
diff --git a/arch/alpha/lib/ev6-strncpy_from_user.S b/arch/alpha/lib/ev6-strncpy_from_user.S
new file mode 100644
index 000000000000..d2e28178cacc
--- /dev/null
+++ b/arch/alpha/lib/ev6-strncpy_from_user.S
@@ -0,0 +1,424 @@
+/*
+ * arch/alpha/lib/ev6-strncpy_from_user.S
+ * 21264 version contributed by Rick Gorton <rick.gorton@alpha-processor.com>
+ *
+ * Just like strncpy except in the return value:
+ *
+ * -EFAULT if an exception occurs before the terminator is copied.
+ * N if the buffer filled.
+ *
+ * Otherwise the length of the string is returned.
+ *
+ * Much of the information about 21264 scheduling/coding comes from:
+ * Compiler Writer's Guide for the Alpha 21264
+ * abbreviated as 'CWG' in other comments here
+ * ftp.digital.com/pub/Digital/info/semiconductor/literature/dsc-library.html
+ * Scheduling notation:
+ * E - either cluster
+ * U - upper subcluster; U0 - subcluster U0; U1 - subcluster U1
+ * L - lower subcluster; L0 - subcluster L0; L1 - subcluster L1
+ * A bunch of instructions got moved and temp registers were changed
+ * to aid in scheduling. Control flow was also re-arranged to eliminate
+ * branches, and to provide longer code sequences to enable better scheduling.
+ * A total rewrite (using byte load/stores for start & tail sequences)
+ * is desirable, but very difficult to do without a from-scratch rewrite.
+ * Save that for the future.
+ */
+
+
+#include <asm/errno.h>
+#include <asm/regdef.h>
+
+
+/* Allow an exception for an insn; exit if we get one. */
+#define EX(x,y...) \
+ 99: x,##y; \
+ .section __ex_table,"a"; \
+ .long 99b - .; \
+ lda $31, $exception-99b($0); \
+ .previous
+
+
+ .set noat
+ .set noreorder
+ .text
+
+ .globl __strncpy_from_user
+ .ent __strncpy_from_user
+ .frame $30, 0, $26
+ .prologue 0
+
+ .align 4
+__strncpy_from_user:
+ and a0, 7, t3 # E : find dest misalignment
+ beq a2, $zerolength # U :
+
+ /* Are source and destination co-aligned? */
+ mov a0, v0 # E : save the string start
+ xor a0, a1, t4 # E :
+ EX( ldq_u t1, 0(a1) ) # L : Latency=3 load first quadword
+ ldq_u t0, 0(a0) # L : load first (partial) aligned dest quadword
+
+ addq a2, t3, a2 # E : bias count by dest misalignment
+ subq a2, 1, a3 # E :
+ addq zero, 1, t10 # E :
+ and t4, 7, t4 # E : misalignment between the two
+
+ and a3, 7, t6 # E : number of tail bytes
+ sll t10, t6, t10 # E : t10 = bitmask of last count byte
+ bne t4, $unaligned # U :
+ lda t2, -1 # E : build a mask against false zero
+
+ /*
+ * We are co-aligned; take care of a partial first word.
+ * On entry to this basic block:
+ * t0 == the first destination word for masking back in
+ * t1 == the first source word.
+ */
+
+ srl a3, 3, a2 # E : a2 = loop counter = (count - 1)/8
+ addq a1, 8, a1 # E :
+ mskqh t2, a1, t2 # U : detection in the src word
+ nop
+
+ /* Create the 1st output word and detect 0's in the 1st input word. */
+ mskqh t1, a1, t3 # U :
+ mskql t0, a1, t0 # U : assemble the first output word
+ ornot t1, t2, t2 # E :
+ nop
+
+ cmpbge zero, t2, t8 # E : bits set iff null found
+ or t0, t3, t0 # E :
+ beq a2, $a_eoc # U :
+ bne t8, $a_eos # U : 2nd branch in a quad. Bad.
+
+ /* On entry to this basic block:
+ * t0 == a source quad not containing a null.
+ * a0 - current aligned destination address
+ * a1 - current aligned source address
+ * a2 - count of quadwords to move.
+ * NOTE: Loop improvement - unrolling this is going to be
+ * a huge win, since we're going to stall otherwise.
+ * Fix this later. For _really_ large copies, look
+ * at using wh64 on a look-ahead basis. See the code
+ * in clear_user.S and copy_user.S.
+ * Presumably, since (a0) and (a1) do not overlap (by C definition)
+ * Lots of nops here:
+ * - Separate loads from stores
+ * - Keep it to 1 branch/quadpack so the branch predictor
+ * can train.
+ */
+$a_loop:
+ stq_u t0, 0(a0) # L :
+ addq a0, 8, a0 # E :
+ nop
+ subq a2, 1, a2 # E :
+
+ EX( ldq_u t0, 0(a1) ) # L :
+ addq a1, 8, a1 # E :
+ cmpbge zero, t0, t8 # E : Stall 2 cycles on t0
+ beq a2, $a_eoc # U :
+
+ beq t8, $a_loop # U :
+ nop
+ nop
+ nop
+
+ /* Take care of the final (partial) word store. At this point
+ * the end-of-count bit is set in t8 iff it applies.
+ *
+ * On entry to this basic block we have:
+ * t0 == the source word containing the null
+ * t8 == the cmpbge mask that found it.
+ */
+$a_eos:
+ negq t8, t12 # E : find low bit set
+ and t8, t12, t12 # E :
+
+ /* We're doing a partial word store and so need to combine
+ our source and original destination words. */
+ ldq_u t1, 0(a0) # L :
+ subq t12, 1, t6 # E :
+
+ or t12, t6, t8 # E :
+ zapnot t0, t8, t0 # U : clear src bytes > null
+ zap t1, t8, t1 # U : clear dst bytes <= null
+ or t0, t1, t0 # E :
+
+ stq_u t0, 0(a0) # L :
+ br $finish_up # L0 :
+ nop
+ nop
+
+ /* Add the end-of-count bit to the eos detection bitmask. */
+ .align 4
+$a_eoc:
+ or t10, t8, t8
+ br $a_eos
+ nop
+ nop
+
+
+/* The source and destination are not co-aligned. Align the destination
+ and cope. We have to be very careful about not reading too much and
+ causing a SEGV. */
+
+ .align 4
+$u_head:
+ /* We know just enough now to be able to assemble the first
+ full source word. We can still find a zero at the end of it
+ that prevents us from outputting the whole thing.
+
+ On entry to this basic block:
+ t0 == the first dest word, unmasked
+ t1 == the shifted low bits of the first source word
+ t6 == bytemask that is -1 in dest word bytes */
+
+ EX( ldq_u t2, 8(a1) ) # L : load second src word
+ addq a1, 8, a1 # E :
+ mskql t0, a0, t0 # U : mask trailing garbage in dst
+ extqh t2, a1, t4 # U :
+
+ or t1, t4, t1 # E : first aligned src word complete
+ mskqh t1, a0, t1 # U : mask leading garbage in src
+ or t0, t1, t0 # E : first output word complete
+ or t0, t6, t6 # E : mask original data for zero test
+
+ cmpbge zero, t6, t8 # E :
+ beq a2, $u_eocfin # U :
+ bne t8, $u_final # U : bad news - 2nd branch in a quad
+ lda t6, -1 # E : mask out the bits we have
+
+ mskql t6, a1, t6 # U : already seen
+ stq_u t0, 0(a0) # L : store first output word
+ or t6, t2, t2 # E :
+ cmpbge zero, t2, t8 # E : find nulls in second partial
+
+ addq a0, 8, a0 # E :
+ subq a2, 1, a2 # E :
+ bne t8, $u_late_head_exit # U :
+ nop
+
+ /* Finally, we've got all the stupid leading edge cases taken care
+ of and we can set up to enter the main loop. */
+
+ extql t2, a1, t1 # U : position hi-bits of lo word
+ EX( ldq_u t2, 8(a1) ) # L : read next high-order source word
+ addq a1, 8, a1 # E :
+ cmpbge zero, t2, t8 # E :
+
+ beq a2, $u_eoc # U :
+ bne t8, $u_eos # U :
+ nop
+ nop
+
+ /* Unaligned copy main loop. In order to avoid reading too much,
+ the loop is structured to detect zeros in aligned source words.
+ This has, unfortunately, effectively pulled half of a loop
+ iteration out into the head and half into the tail, but it does
+ prevent nastiness from accumulating in the very thing we want
+ to run as fast as possible.
+
+ On entry to this basic block:
+ t1 == the shifted high-order bits from the previous source word
+ t2 == the unshifted current source word
+
+ We further know that t2 does not contain a null terminator. */
+
+ /*
+ * Extra nops here:
+ * separate load quads from store quads
+ * only one branch/quad to permit predictor training
+ */
+
+ .align 4
+$u_loop:
+ extqh t2, a1, t0 # U : extract high bits for current word
+ addq a1, 8, a1 # E :
+ extql t2, a1, t3 # U : extract low bits for next time
+ addq a0, 8, a0 # E :
+
+ or t0, t1, t0 # E : current dst word now complete
+ EX( ldq_u t2, 0(a1) ) # L : load high word for next time
+ subq a2, 1, a2 # E :
+ nop
+
+ stq_u t0, -8(a0) # L : save the current word
+ mov t3, t1 # E :
+ cmpbge zero, t2, t8 # E : test new word for eos
+ beq a2, $u_eoc # U :
+
+ beq t8, $u_loop # U :
+ nop
+ nop
+ nop
+
+ /* We've found a zero somewhere in the source word we just read.
+ If it resides in the lower half, we have one (probably partial)
+ word to write out, and if it resides in the upper half, we
+ have one full and one partial word left to write out.
+
+ On entry to this basic block:
+ t1 == the shifted high-order bits from the previous source word
+ t2 == the unshifted current source word. */
+ .align 4
+$u_eos:
+ extqh t2, a1, t0 # U :
+ or t0, t1, t0 # E : first (partial) source word complete
+ cmpbge zero, t0, t8 # E : is the null in this first bit?
+ nop
+
+ bne t8, $u_final # U :
+ stq_u t0, 0(a0) # L : the null was in the high-order bits
+ addq a0, 8, a0 # E :
+ subq a2, 1, a2 # E :
+
+ .align 4
+$u_late_head_exit:
+ extql t2, a1, t0 # U :
+ cmpbge zero, t0, t8 # E :
+ or t8, t10, t6 # E :
+ cmoveq a2, t6, t8 # E :
+
+ /* Take care of a final (probably partial) result word.
+ On entry to this basic block:
+ t0 == assembled source word
+ t8 == cmpbge mask that found the null. */
+ .align 4
+$u_final:
+ negq t8, t6 # E : isolate low bit set
+ and t6, t8, t12 # E :
+ ldq_u t1, 0(a0) # L :
+ subq t12, 1, t6 # E :
+
+ or t6, t12, t8 # E :
+ zapnot t0, t8, t0 # U : kill source bytes > null
+ zap t1, t8, t1 # U : kill dest bytes <= null
+ or t0, t1, t0 # E :
+
+ stq_u t0, 0(a0) # E :
+ br $finish_up # U :
+ nop
+ nop
+
+ .align 4
+$u_eoc: # end-of-count
+ extqh t2, a1, t0 # U :
+ or t0, t1, t0 # E :
+ cmpbge zero, t0, t8 # E :
+ nop
+
+ .align 4
+$u_eocfin: # end-of-count, final word
+ or t10, t8, t8 # E :
+ br $u_final # U :
+ nop
+ nop
+
+ /* Unaligned copy entry point. */
+ .align 4
+$unaligned:
+
+ srl a3, 3, a2 # U : a2 = loop counter = (count - 1)/8
+ and a0, 7, t4 # E : find dest misalignment
+ and a1, 7, t5 # E : find src misalignment
+ mov zero, t0 # E :
+
+ /* Conditionally load the first destination word and a bytemask
+ with 0xff indicating that the destination byte is sacrosanct. */
+
+ mov zero, t6 # E :
+ beq t4, 1f # U :
+ ldq_u t0, 0(a0) # L :
+ lda t6, -1 # E :
+
+ mskql t6, a0, t6 # E :
+ nop
+ nop
+ nop
+
+ .align 4
+1:
+ subq a1, t4, a1 # E : sub dest misalignment from src addr
+ /* If source misalignment is larger than dest misalignment, we need
+ extra startup checks to avoid SEGV. */
+ cmplt t4, t5, t12 # E :
+ extql t1, a1, t1 # U : shift src into place
+ lda t2, -1 # E : for creating masks later
+
+ beq t12, $u_head # U :
+ mskqh t2, t5, t2 # U : begin src byte validity mask
+ cmpbge zero, t1, t8 # E : is there a zero?
+ nop
+
+ extql t2, a1, t2 # U :
+ or t8, t10, t5 # E : test for end-of-count too
+ cmpbge zero, t2, t3 # E :
+ cmoveq a2, t5, t8 # E : Latency=2, extra map slot
+
+ nop # E : goes with cmov
+ andnot t8, t3, t8 # E :
+ beq t8, $u_head # U :
+ nop
+
+ /* At this point we've found a zero in the first partial word of
+ the source. We need to isolate the valid source data and mask
+ it into the original destination data. (Incidentally, we know
+ that we'll need at least one byte of that original dest word.) */
+
+ ldq_u t0, 0(a0) # L :
+ negq t8, t6 # E : build bitmask of bytes <= zero
+ mskqh t1, t4, t1 # U :
+ and t6, t8, t12 # E :
+
+ subq t12, 1, t6 # E :
+ or t6, t12, t8 # E :
+ zapnot t2, t8, t2 # U : prepare source word; mirror changes
+ zapnot t1, t8, t1 # U : to source validity mask
+
+ andnot t0, t2, t0 # E : zero place for source to reside
+ or t0, t1, t0 # E : and put it there
+ stq_u t0, 0(a0) # L :
+ nop
+
+ .align 4
+$finish_up:
+ zapnot t0, t12, t4 # U : was last byte written null?
+ and t12, 0xf0, t3 # E : binary search for the address of the
+ cmovne t4, 1, t4 # E : Latency=2, extra map slot
+ nop # E : with cmovne
+
+ and t12, 0xcc, t2 # E : last byte written
+ and t12, 0xaa, t1 # E :
+ cmovne t3, 4, t3 # E : Latency=2, extra map slot
+ nop # E : with cmovne
+
+ bic a0, 7, t0
+ cmovne t2, 2, t2 # E : Latency=2, extra map slot
+ nop # E : with cmovne
+ nop
+
+ cmovne t1, 1, t1 # E : Latency=2, extra map slot
+ nop # E : with cmovne
+ addq t0, t3, t0 # E :
+ addq t1, t2, t1 # E :
+
+ addq t0, t1, t0 # E :
+ addq t0, t4, t0 # add one if we filled the buffer
+ subq t0, v0, v0 # find string length
+ ret # L0 :
+
+ .align 4
+$zerolength:
+ nop
+ nop
+ nop
+ clr v0
+
+$exception:
+ nop
+ nop
+ nop
+ ret
+
+ .end __strncpy_from_user
diff --git a/arch/alpha/lib/ev6-stxcpy.S b/arch/alpha/lib/ev6-stxcpy.S
new file mode 100644
index 000000000000..4643ff2ffc8d
--- /dev/null
+++ b/arch/alpha/lib/ev6-stxcpy.S
@@ -0,0 +1,321 @@
+/*
+ * arch/alpha/lib/ev6-stxcpy.S
+ * 21264 version contributed by Rick Gorton <rick.gorton@alpha-processor.com>
+ *
+ * Copy a null-terminated string from SRC to DST.
+ *
+ * This is an internal routine used by strcpy, stpcpy, and strcat.
+ * As such, it uses special linkage conventions to make implementation
+ * of these public functions more efficient.
+ *
+ * On input:
+ * t9 = return address
+ * a0 = DST
+ * a1 = SRC
+ *
+ * On output:
+ * t12 = bitmask (with one bit set) indicating the last byte written
+ * a0 = unaligned address of the last *word* written
+ *
+ * Furthermore, v0, a3-a5, t11, and t12 are untouched.
+ *
+ * Much of the information about 21264 scheduling/coding comes from:
+ * Compiler Writer's Guide for the Alpha 21264
+ * abbreviated as 'CWG' in other comments here
+ * ftp.digital.com/pub/Digital/info/semiconductor/literature/dsc-library.html
+ * Scheduling notation:
+ * E - either cluster
+ * U - upper subcluster; U0 - subcluster U0; U1 - subcluster U1
+ * L - lower subcluster; L0 - subcluster L0; L1 - subcluster L1
+ * Try not to change the actual algorithm if possible for consistency.
+ */
+
+#include <asm/regdef.h>
+
+ .set noat
+ .set noreorder
+
+ .text
+
+/* There is a problem with either gdb (as of 4.16) or gas (as of 2.7) that
+ doesn't like putting the entry point for a procedure somewhere in the
+ middle of the procedure descriptor. Work around this by putting the
+ aligned copy in its own procedure descriptor */
+
+
+ .ent stxcpy_aligned
+ .align 4
+stxcpy_aligned:
+ .frame sp, 0, t9
+ .prologue 0
+
+ /* On entry to this basic block:
+ t0 == the first destination word for masking back in
+ t1 == the first source word. */
+
+ /* Create the 1st output word and detect 0's in the 1st input word. */
+ lda t2, -1 # E : build a mask against false zero
+ mskqh t2, a1, t2 # U : detection in the src word (stall)
+ mskqh t1, a1, t3 # U :
+ ornot t1, t2, t2 # E : (stall)
+
+ mskql t0, a1, t0 # U : assemble the first output word
+ cmpbge zero, t2, t8 # E : bits set iff null found
+ or t0, t3, t1 # E : (stall)
+ bne t8, $a_eos # U : (stall)
+
+ /* On entry to this basic block:
+ t0 == the first destination word for masking back in
+ t1 == a source word not containing a null. */
+ /* Nops here to separate store quads from load quads */
+
+$a_loop:
+ stq_u t1, 0(a0) # L :
+ addq a0, 8, a0 # E :
+ nop
+ nop
+
+ ldq_u t1, 0(a1) # L : Latency=3
+ addq a1, 8, a1 # E :
+ cmpbge zero, t1, t8 # E : (3 cycle stall)
+ beq t8, $a_loop # U : (stall for t8)
+
+ /* Take care of the final (partial) word store.
+ On entry to this basic block we have:
+ t1 == the source word containing the null
+ t8 == the cmpbge mask that found it. */
+$a_eos:
+ negq t8, t6 # E : find low bit set
+ and t8, t6, t12 # E : (stall)
+ /* For the sake of the cache, don't read a destination word
+ if we're not going to need it. */
+ and t12, 0x80, t6 # E : (stall)
+ bne t6, 1f # U : (stall)
+
+ /* We're doing a partial word store and so need to combine
+ our source and original destination words. */
+ ldq_u t0, 0(a0) # L : Latency=3
+ subq t12, 1, t6 # E :
+ zapnot t1, t6, t1 # U : clear src bytes >= null (stall)
+ or t12, t6, t8 # E : (stall)
+
+ zap t0, t8, t0 # E : clear dst bytes <= null
+ or t0, t1, t1 # E : (stall)
+ nop
+ nop
+
+1: stq_u t1, 0(a0) # L :
+ ret (t9) # L0 : Latency=3
+ nop
+ nop
+
+ .end stxcpy_aligned
+
+ .align 4
+ .ent __stxcpy
+ .globl __stxcpy
+__stxcpy:
+ .frame sp, 0, t9
+ .prologue 0
+
+ /* Are source and destination co-aligned? */
+ xor a0, a1, t0 # E :
+ unop # E :
+ and t0, 7, t0 # E : (stall)
+ bne t0, $unaligned # U : (stall)
+
+ /* We are co-aligned; take care of a partial first word. */
+ ldq_u t1, 0(a1) # L : load first src word
+ and a0, 7, t0 # E : take care not to load a word ...
+ addq a1, 8, a1 # E :
+ beq t0, stxcpy_aligned # U : ... if we wont need it (stall)
+
+ ldq_u t0, 0(a0) # L :
+ br stxcpy_aligned # L0 : Latency=3
+ nop
+ nop
+
+
+/* The source and destination are not co-aligned. Align the destination
+ and cope. We have to be very careful about not reading too much and
+ causing a SEGV. */
+
+ .align 4
+$u_head:
+ /* We know just enough now to be able to assemble the first
+ full source word. We can still find a zero at the end of it
+ that prevents us from outputting the whole thing.
+
+ On entry to this basic block:
+ t0 == the first dest word, for masking back in, if needed else 0
+ t1 == the low bits of the first source word
+ t6 == bytemask that is -1 in dest word bytes */
+
+ ldq_u t2, 8(a1) # L :
+ addq a1, 8, a1 # E :
+ extql t1, a1, t1 # U : (stall on a1)
+ extqh t2, a1, t4 # U : (stall on a1)
+
+ mskql t0, a0, t0 # U :
+ or t1, t4, t1 # E :
+ mskqh t1, a0, t1 # U : (stall on t1)
+ or t0, t1, t1 # E : (stall on t1)
+
+ or t1, t6, t6 # E :
+ cmpbge zero, t6, t8 # E : (stall)
+ lda t6, -1 # E : for masking just below
+ bne t8, $u_final # U : (stall)
+
+ mskql t6, a1, t6 # U : mask out the bits we have
+ or t6, t2, t2 # E : already extracted before (stall)
+ cmpbge zero, t2, t8 # E : testing eos (stall)
+ bne t8, $u_late_head_exit # U : (stall)
+
+ /* Finally, we've got all the stupid leading edge cases taken care
+ of and we can set up to enter the main loop. */
+
+ stq_u t1, 0(a0) # L : store first output word
+ addq a0, 8, a0 # E :
+ extql t2, a1, t0 # U : position ho-bits of lo word
+ ldq_u t2, 8(a1) # U : read next high-order source word
+
+ addq a1, 8, a1 # E :
+ cmpbge zero, t2, t8 # E : (stall for t2)
+ nop # E :
+ bne t8, $u_eos # U : (stall)
+
+ /* Unaligned copy main loop. In order to avoid reading too much,
+ the loop is structured to detect zeros in aligned source words.
+ This has, unfortunately, effectively pulled half of a loop
+ iteration out into the head and half into the tail, but it does
+ prevent nastiness from accumulating in the very thing we want
+ to run as fast as possible.
+
+ On entry to this basic block:
+ t0 == the shifted high-order bits from the previous source word
+ t2 == the unshifted current source word
+
+ We further know that t2 does not contain a null terminator. */
+
+ .align 3
+$u_loop:
+ extqh t2, a1, t1 # U : extract high bits for current word
+ addq a1, 8, a1 # E : (stall)
+ extql t2, a1, t3 # U : extract low bits for next time (stall)
+ addq a0, 8, a0 # E :
+
+ or t0, t1, t1 # E : current dst word now complete
+ ldq_u t2, 0(a1) # L : Latency=3 load high word for next time
+ stq_u t1, -8(a0) # L : save the current word (stall)
+ mov t3, t0 # E :
+
+ cmpbge zero, t2, t8 # E : test new word for eos
+ beq t8, $u_loop # U : (stall)
+ nop
+ nop
+
+ /* We've found a zero somewhere in the source word we just read.
+ If it resides in the lower half, we have one (probably partial)
+ word to write out, and if it resides in the upper half, we
+ have one full and one partial word left to write out.
+
+ On entry to this basic block:
+ t0 == the shifted high-order bits from the previous source word
+ t2 == the unshifted current source word. */
+$u_eos:
+ extqh t2, a1, t1 # U :
+ or t0, t1, t1 # E : first (partial) source word complete (stall)
+ cmpbge zero, t1, t8 # E : is the null in this first bit? (stall)
+ bne t8, $u_final # U : (stall)
+
+$u_late_head_exit:
+ stq_u t1, 0(a0) # L : the null was in the high-order bits
+ addq a0, 8, a0 # E :
+ extql t2, a1, t1 # U :
+ cmpbge zero, t1, t8 # E : (stall)
+
+ /* Take care of a final (probably partial) result word.
+ On entry to this basic block:
+ t1 == assembled source word
+ t8 == cmpbge mask that found the null. */
+$u_final:
+ negq t8, t6 # E : isolate low bit set
+ and t6, t8, t12 # E : (stall)
+ and t12, 0x80, t6 # E : avoid dest word load if we can (stall)
+ bne t6, 1f # U : (stall)
+
+ ldq_u t0, 0(a0) # E :
+ subq t12, 1, t6 # E :
+ or t6, t12, t8 # E : (stall)
+ zapnot t1, t6, t1 # U : kill source bytes >= null (stall)
+
+ zap t0, t8, t0 # U : kill dest bytes <= null (2 cycle data stall)
+ or t0, t1, t1 # E : (stall)
+ nop
+ nop
+
+1: stq_u t1, 0(a0) # L :
+ ret (t9) # L0 : Latency=3
+ nop
+ nop
+
+ /* Unaligned copy entry point. */
+ .align 4
+$unaligned:
+
+ ldq_u t1, 0(a1) # L : load first source word
+ and a0, 7, t4 # E : find dest misalignment
+ and a1, 7, t5 # E : find src misalignment
+ /* Conditionally load the first destination word and a bytemask
+ with 0xff indicating that the destination byte is sacrosanct. */
+ mov zero, t0 # E :
+
+ mov zero, t6 # E :
+ beq t4, 1f # U :
+ ldq_u t0, 0(a0) # L :
+ lda t6, -1 # E :
+
+ mskql t6, a0, t6 # U :
+ nop
+ nop
+ nop
+1:
+ subq a1, t4, a1 # E : sub dest misalignment from src addr
+ /* If source misalignment is larger than dest misalignment, we need
+ extra startup checks to avoid SEGV. */
+ cmplt t4, t5, t12 # E :
+ beq t12, $u_head # U :
+ lda t2, -1 # E : mask out leading garbage in source
+
+ mskqh t2, t5, t2 # U :
+ ornot t1, t2, t3 # E : (stall)
+ cmpbge zero, t3, t8 # E : is there a zero? (stall)
+ beq t8, $u_head # U : (stall)
+
+ /* At this point we've found a zero in the first partial word of
+ the source. We need to isolate the valid source data and mask
+ it into the original destination data. (Incidentally, we know
+ that we'll need at least one byte of that original dest word.) */
+
+ ldq_u t0, 0(a0) # L :
+ negq t8, t6 # E : build bitmask of bytes <= zero
+ and t6, t8, t12 # E : (stall)
+ and a1, 7, t5 # E :
+
+ subq t12, 1, t6 # E :
+ or t6, t12, t8 # E : (stall)
+ srl t12, t5, t12 # U : adjust final null return value
+ zapnot t2, t8, t2 # U : prepare source word; mirror changes (stall)
+
+ and t1, t2, t1 # E : to source validity mask
+ extql t2, a1, t2 # U :
+ extql t1, a1, t1 # U : (stall)
+ andnot t0, t2, t0 # .. e1 : zero place for source to reside (stall)
+
+ or t0, t1, t1 # e1 : and put it there
+ stq_u t1, 0(a0) # .. e0 : (stall)
+ ret (t9) # e1 :
+ nop
+
+ .end __stxcpy
+
diff --git a/arch/alpha/lib/ev6-stxncpy.S b/arch/alpha/lib/ev6-stxncpy.S
new file mode 100644
index 000000000000..b581a7af2456
--- /dev/null
+++ b/arch/alpha/lib/ev6-stxncpy.S
@@ -0,0 +1,397 @@
+/*
+ * arch/alpha/lib/ev6-stxncpy.S
+ * 21264 version contributed by Rick Gorton <rick.gorton@api-networks.com>
+ *
+ * Copy no more than COUNT bytes of the null-terminated string from
+ * SRC to DST.
+ *
+ * This is an internal routine used by strncpy, stpncpy, and strncat.
+ * As such, it uses special linkage conventions to make implementation
+ * of these public functions more efficient.
+ *
+ * On input:
+ * t9 = return address
+ * a0 = DST
+ * a1 = SRC
+ * a2 = COUNT
+ *
+ * Furthermore, COUNT may not be zero.
+ *
+ * On output:
+ * t0 = last word written
+ * t10 = bitmask (with one bit set) indicating the byte position of
+ * the end of the range specified by COUNT
+ * t12 = bitmask (with one bit set) indicating the last byte written
+ * a0 = unaligned address of the last *word* written
+ * a2 = the number of full words left in COUNT
+ *
+ * Furthermore, v0, a3-a5, t11, and $at are untouched.
+ *
+ * Much of the information about 21264 scheduling/coding comes from:
+ * Compiler Writer's Guide for the Alpha 21264
+ * abbreviated as 'CWG' in other comments here
+ * ftp.digital.com/pub/Digital/info/semiconductor/literature/dsc-library.html
+ * Scheduling notation:
+ * E - either cluster
+ * U - upper subcluster; U0 - subcluster U0; U1 - subcluster U1
+ * L - lower subcluster; L0 - subcluster L0; L1 - subcluster L1
+ * Try not to change the actual algorithm if possible for consistency.
+ */
+
+#include <asm/regdef.h>
+
+ .set noat
+ .set noreorder
+
+ .text
+
+/* There is a problem with either gdb (as of 4.16) or gas (as of 2.7) that
+ doesn't like putting the entry point for a procedure somewhere in the
+ middle of the procedure descriptor. Work around this by putting the
+ aligned copy in its own procedure descriptor */
+
+
+ .ent stxncpy_aligned
+ .align 4
+stxncpy_aligned:
+ .frame sp, 0, t9, 0
+ .prologue 0
+
+ /* On entry to this basic block:
+ t0 == the first destination word for masking back in
+ t1 == the first source word. */
+
+ /* Create the 1st output word and detect 0's in the 1st input word. */
+ lda t2, -1 # E : build a mask against false zero
+ mskqh t2, a1, t2 # U : detection in the src word (stall)
+ mskqh t1, a1, t3 # U :
+ ornot t1, t2, t2 # E : (stall)
+
+ mskql t0, a1, t0 # U : assemble the first output word
+ cmpbge zero, t2, t8 # E : bits set iff null found
+ or t0, t3, t0 # E : (stall)
+ beq a2, $a_eoc # U :
+
+ bne t8, $a_eos # U :
+ nop
+ nop
+ nop
+
+ /* On entry to this basic block:
+ t0 == a source word not containing a null. */
+
+ /*
+ * nops here to:
+ * separate store quads from load quads
+ * limit of 1 bcond/quad to permit training
+ */
+$a_loop:
+ stq_u t0, 0(a0) # L :
+ addq a0, 8, a0 # E :
+ subq a2, 1, a2 # E :
+ nop
+
+ ldq_u t0, 0(a1) # L :
+ addq a1, 8, a1 # E :
+ cmpbge zero, t0, t8 # E :
+ beq a2, $a_eoc # U :
+
+ beq t8, $a_loop # U :
+ nop
+ nop
+ nop
+
+ /* Take care of the final (partial) word store. At this point
+ the end-of-count bit is set in t8 iff it applies.
+
+ On entry to this basic block we have:
+ t0 == the source word containing the null
+ t8 == the cmpbge mask that found it. */
+
+$a_eos:
+ negq t8, t12 # E : find low bit set
+ and t8, t12, t12 # E : (stall)
+ /* For the sake of the cache, don't read a destination word
+ if we're not going to need it. */
+ and t12, 0x80, t6 # E : (stall)
+ bne t6, 1f # U : (stall)
+
+ /* We're doing a partial word store and so need to combine
+ our source and original destination words. */
+ ldq_u t1, 0(a0) # L :
+ subq t12, 1, t6 # E :
+ or t12, t6, t8 # E : (stall)
+ zapnot t0, t8, t0 # U : clear src bytes > null (stall)
+
+ zap t1, t8, t1 # .. e1 : clear dst bytes <= null
+ or t0, t1, t0 # e1 : (stall)
+ nop
+ nop
+
+1: stq_u t0, 0(a0) # L :
+ ret (t9) # L0 : Latency=3
+ nop
+ nop
+
+ /* Add the end-of-count bit to the eos detection bitmask. */
+$a_eoc:
+ or t10, t8, t8 # E :
+ br $a_eos # L0 : Latency=3
+ nop
+ nop
+
+ .end stxncpy_aligned
+
+ .align 4
+ .ent __stxncpy
+ .globl __stxncpy
+__stxncpy:
+ .frame sp, 0, t9, 0
+ .prologue 0
+
+ /* Are source and destination co-aligned? */
+ xor a0, a1, t1 # E :
+ and a0, 7, t0 # E : find dest misalignment
+ and t1, 7, t1 # E : (stall)
+ addq a2, t0, a2 # E : bias count by dest misalignment (stall)
+
+ subq a2, 1, a2 # E :
+ and a2, 7, t2 # E : (stall)
+ srl a2, 3, a2 # U : a2 = loop counter = (count - 1)/8 (stall)
+ addq zero, 1, t10 # E :
+
+ sll t10, t2, t10 # U : t10 = bitmask of last count byte
+ bne t1, $unaligned # U :
+ /* We are co-aligned; take care of a partial first word. */
+ ldq_u t1, 0(a1) # L : load first src word
+ addq a1, 8, a1 # E :
+
+ beq t0, stxncpy_aligned # U : avoid loading dest word if not needed
+ ldq_u t0, 0(a0) # L :
+ nop
+ nop
+
+ br stxncpy_aligned # .. e1 :
+ nop
+ nop
+ nop
+
+
+
+/* The source and destination are not co-aligned. Align the destination
+ and cope. We have to be very careful about not reading too much and
+ causing a SEGV. */
+
+ .align 4
+$u_head:
+ /* We know just enough now to be able to assemble the first
+ full source word. We can still find a zero at the end of it
+ that prevents us from outputting the whole thing.
+
+ On entry to this basic block:
+ t0 == the first dest word, unmasked
+ t1 == the shifted low bits of the first source word
+ t6 == bytemask that is -1 in dest word bytes */
+
+ ldq_u t2, 8(a1) # L : Latency=3 load second src word
+ addq a1, 8, a1 # E :
+ mskql t0, a0, t0 # U : mask trailing garbage in dst
+ extqh t2, a1, t4 # U : (3 cycle stall on t2)
+
+ or t1, t4, t1 # E : first aligned src word complete (stall)
+ mskqh t1, a0, t1 # U : mask leading garbage in src (stall)
+ or t0, t1, t0 # E : first output word complete (stall)
+ or t0, t6, t6 # E : mask original data for zero test (stall)
+
+ cmpbge zero, t6, t8 # E :
+ beq a2, $u_eocfin # U :
+ lda t6, -1 # E :
+ nop
+
+ bne t8, $u_final # U :
+ mskql t6, a1, t6 # U : mask out bits already seen
+ stq_u t0, 0(a0) # L : store first output word
+ or t6, t2, t2 # E : (stall)
+
+ cmpbge zero, t2, t8 # E : find nulls in second partial
+ addq a0, 8, a0 # E :
+ subq a2, 1, a2 # E :
+ bne t8, $u_late_head_exit # U :
+
+ /* Finally, we've got all the stupid leading edge cases taken care
+ of and we can set up to enter the main loop. */
+ extql t2, a1, t1 # U : position hi-bits of lo word
+ beq a2, $u_eoc # U :
+ ldq_u t2, 8(a1) # L : read next high-order source word
+ addq a1, 8, a1 # E :
+
+ extqh t2, a1, t0 # U : position lo-bits of hi word (stall)
+ cmpbge zero, t2, t8 # E :
+ nop
+ bne t8, $u_eos # U :
+
+ /* Unaligned copy main loop. In order to avoid reading too much,
+ the loop is structured to detect zeros in aligned source words.
+ This has, unfortunately, effectively pulled half of a loop
+ iteration out into the head and half into the tail, but it does
+ prevent nastiness from accumulating in the very thing we want
+ to run as fast as possible.
+
+ On entry to this basic block:
+ t0 == the shifted low-order bits from the current source word
+ t1 == the shifted high-order bits from the previous source word
+ t2 == the unshifted current source word
+
+ We further know that t2 does not contain a null terminator. */
+
+ .align 4
+$u_loop:
+ or t0, t1, t0 # E : current dst word now complete
+ subq a2, 1, a2 # E : decrement word count
+ extql t2, a1, t1 # U : extract low bits for next time
+ addq a0, 8, a0 # E :
+
+ stq_u t0, -8(a0) # U : save the current word
+ beq a2, $u_eoc # U :
+ ldq_u t2, 8(a1) # U : Latency=3 load high word for next time
+ addq a1, 8, a1 # E :
+
+ extqh t2, a1, t0 # U : extract low bits (2 cycle stall)
+ cmpbge zero, t2, t8 # E : test new word for eos
+ nop
+ beq t8, $u_loop # U :
+
+ /* We've found a zero somewhere in the source word we just read.
+ If it resides in the lower half, we have one (probably partial)
+ word to write out, and if it resides in the upper half, we
+ have one full and one partial word left to write out.
+
+ On entry to this basic block:
+ t0 == the shifted low-order bits from the current source word
+ t1 == the shifted high-order bits from the previous source word
+ t2 == the unshifted current source word. */
+$u_eos:
+ or t0, t1, t0 # E : first (partial) source word complete
+ nop
+ cmpbge zero, t0, t8 # E : is the null in this first bit? (stall)
+ bne t8, $u_final # U : (stall)
+
+ stq_u t0, 0(a0) # L : the null was in the high-order bits
+ addq a0, 8, a0 # E :
+ subq a2, 1, a2 # E :
+ nop
+
+$u_late_head_exit:
+ extql t2, a1, t0 # U :
+ cmpbge zero, t0, t8 # E :
+ or t8, t10, t6 # E : (stall)
+ cmoveq a2, t6, t8 # E : Latency=2, extra map slot (stall)
+
+ /* Take care of a final (probably partial) result word.
+ On entry to this basic block:
+ t0 == assembled source word
+ t8 == cmpbge mask that found the null. */
+$u_final:
+ negq t8, t6 # E : isolate low bit set
+ and t6, t8, t12 # E : (stall)
+ and t12, 0x80, t6 # E : avoid dest word load if we can (stall)
+ bne t6, 1f # U : (stall)
+
+ ldq_u t1, 0(a0) # L :
+ subq t12, 1, t6 # E :
+ or t6, t12, t8 # E : (stall)
+ zapnot t0, t8, t0 # U : kill source bytes > null
+
+ zap t1, t8, t1 # U : kill dest bytes <= null
+ or t0, t1, t0 # E : (stall)
+ nop
+ nop
+
+1: stq_u t0, 0(a0) # L :
+ ret (t9) # L0 : Latency=3
+
+ /* Got to end-of-count before end of string.
+ On entry to this basic block:
+ t1 == the shifted high-order bits from the previous source word */
+$u_eoc:
+ and a1, 7, t6 # E : avoid final load if possible
+ sll t10, t6, t6 # U : (stall)
+ and t6, 0xff, t6 # E : (stall)
+ bne t6, 1f # U : (stall)
+
+ ldq_u t2, 8(a1) # L : load final src word
+ nop
+ extqh t2, a1, t0 # U : extract low bits for last word (stall)
+ or t1, t0, t1 # E : (stall)
+
+1: cmpbge zero, t1, t8 # E :
+ mov t1, t0 # E :
+
+$u_eocfin: # end-of-count, final word
+ or t10, t8, t8 # E :
+ br $u_final # L0 : Latency=3
+
+ /* Unaligned copy entry point. */
+ .align 4
+$unaligned:
+
+ ldq_u t1, 0(a1) # L : load first source word
+ and a0, 7, t4 # E : find dest misalignment
+ and a1, 7, t5 # E : find src misalignment
+ /* Conditionally load the first destination word and a bytemask
+ with 0xff indicating that the destination byte is sacrosanct. */
+ mov zero, t0 # E :
+
+ mov zero, t6 # E :
+ beq t4, 1f # U :
+ ldq_u t0, 0(a0) # L :
+ lda t6, -1 # E :
+
+ mskql t6, a0, t6 # U :
+ nop
+ nop
+ subq a1, t4, a1 # E : sub dest misalignment from src addr
+
+ /* If source misalignment is larger than dest misalignment, we need
+ extra startup checks to avoid SEGV. */
+
+1: cmplt t4, t5, t12 # E :
+ extql t1, a1, t1 # U : shift src into place
+ lda t2, -1 # E : for creating masks later
+ beq t12, $u_head # U : (stall)
+
+ extql t2, a1, t2 # U :
+ cmpbge zero, t1, t8 # E : is there a zero?
+ andnot t2, t6, t12 # E : dest mask for a single word copy
+ or t8, t10, t5 # E : test for end-of-count too
+
+ cmpbge zero, t12, t3 # E :
+ cmoveq a2, t5, t8 # E : Latency=2, extra map slot
+ nop # E : keep with cmoveq
+ andnot t8, t3, t8 # E : (stall)
+
+ beq t8, $u_head # U :
+ /* At this point we've found a zero in the first partial word of
+ the source. We need to isolate the valid source data and mask
+ it into the original destination data. (Incidentally, we know
+ that we'll need at least one byte of that original dest word.) */
+ ldq_u t0, 0(a0) # L :
+ negq t8, t6 # E : build bitmask of bytes <= zero
+ mskqh t1, t4, t1 # U :
+
+ and t6, t8, t2 # E :
+ subq t2, 1, t6 # E : (stall)
+ or t6, t2, t8 # E : (stall)
+ zapnot t12, t8, t12 # U : prepare source word; mirror changes (stall)
+
+ zapnot t1, t8, t1 # U : to source validity mask
+ andnot t0, t12, t0 # E : zero place for source to reside
+ or t0, t1, t0 # E : and put it there (stall both t0, t1)
+ stq_u t0, 0(a0) # L : (stall)
+
+ ret (t9) # L0 : Latency=3
+ nop
+ nop
+ nop
+
+ .end __stxncpy
diff --git a/arch/alpha/lib/ev67-strcat.S b/arch/alpha/lib/ev67-strcat.S
new file mode 100644
index 000000000000..c426fe3ed72f
--- /dev/null
+++ b/arch/alpha/lib/ev67-strcat.S
@@ -0,0 +1,54 @@
+/*
+ * arch/alpha/lib/ev67-strcat.S
+ * 21264 version contributed by Rick Gorton <rick.gorton@alpha-processor.com>
+ *
+ * Append a null-terminated string from SRC to DST.
+ *
+ * Much of the information about 21264 scheduling/coding comes from:
+ * Compiler Writer's Guide for the Alpha 21264
+ * abbreviated as 'CWG' in other comments here
+ * ftp.digital.com/pub/Digital/info/semiconductor/literature/dsc-library.html
+ * Scheduling notation:
+ * E - either cluster
+ * U - upper subcluster; U0 - subcluster U0; U1 - subcluster U1
+ * L - lower subcluster; L0 - subcluster L0; L1 - subcluster L1
+ * Try not to change the actual algorithm if possible for consistency.
+ * Commentary: It seems bogus to walk the input string twice - once
+ * to determine the length, and then again while doing the copy.
+ * A significant (future) enhancement would be to only read the input
+ * string once.
+ */
+
+
+ .text
+
+ .align 4
+ .globl strcat
+ .ent strcat
+strcat:
+ .frame $30, 0, $26
+ .prologue 0
+
+ mov $16, $0 # E : set up return value
+ /* Find the end of the string. */
+ ldq_u $1, 0($16) # L : load first quadword (a0 may be misaligned)
+ lda $2, -1 # E :
+ insqh $2, $16, $2 # U :
+
+ andnot $16, 7, $16 # E :
+ or $2, $1, $1 # E :
+ cmpbge $31, $1, $2 # E : bits set iff byte == 0
+ bne $2, $found # U :
+
+$loop: ldq $1, 8($16) # L :
+ addq $16, 8, $16 # E :
+ cmpbge $31, $1, $2 # E :
+ beq $2, $loop # U :
+
+$found: cttz $2, $3 # U0 :
+ addq $16, $3, $16 # E :
+ /* Now do the append. */
+ mov $26, $23 # E :
+ br __stxcpy # L0 :
+
+ .end strcat
diff --git a/arch/alpha/lib/ev67-strchr.S b/arch/alpha/lib/ev67-strchr.S
new file mode 100644
index 000000000000..fbb7b4ffade9
--- /dev/null
+++ b/arch/alpha/lib/ev67-strchr.S
@@ -0,0 +1,88 @@
+/*
+ * arch/alpha/lib/ev67-strchr.S
+ * 21264 version contributed by Rick Gorton <rick.gorton@alpha-processor.com>
+ *
+ * Return the address of a given character within a null-terminated
+ * string, or null if it is not found.
+ *
+ * Much of the information about 21264 scheduling/coding comes from:
+ * Compiler Writer's Guide for the Alpha 21264
+ * abbreviated as 'CWG' in other comments here
+ * ftp.digital.com/pub/Digital/info/semiconductor/literature/dsc-library.html
+ * Scheduling notation:
+ * E - either cluster
+ * U - upper subcluster; U0 - subcluster U0; U1 - subcluster U1
+ * L - lower subcluster; L0 - subcluster L0; L1 - subcluster L1
+ * Try not to change the actual algorithm if possible for consistency.
+ */
+
+#include <asm/regdef.h>
+
+ .set noreorder
+ .set noat
+
+ .align 4
+ .globl strchr
+ .ent strchr
+strchr:
+ .frame sp, 0, ra
+ .prologue 0
+
+ ldq_u t0, 0(a0) # L : load first quadword Latency=3
+ and a1, 0xff, t3 # E : 00000000000000ch
+ insbl a1, 1, t5 # U : 000000000000ch00
+ insbl a1, 7, a2 # U : ch00000000000000
+
+ insbl t3, 6, a3 # U : 00ch000000000000
+ or t5, t3, a1 # E : 000000000000chch
+ andnot a0, 7, v0 # E : align our loop pointer
+ lda t4, -1 # E : build garbage mask
+
+ mskqh t4, a0, t4 # U : only want relevant part of first quad
+ or a2, a3, a2 # E : chch000000000000
+ inswl a1, 2, t5 # E : 00000000chch0000
+ inswl a1, 4, a3 # E : 0000chch00000000
+
+ or a1, a2, a1 # E : chch00000000chch
+ or a3, t5, t5 # E : 0000chchchch0000
+ cmpbge zero, t0, t2 # E : bits set iff byte == zero
+ cmpbge zero, t4, t4 # E : bits set iff byte is garbage
+
+ /* This quad is _very_ serialized. Lots of stalling happens */
+ or t5, a1, a1 # E : chchchchchchchch
+ xor t0, a1, t1 # E : make bytes == c zero
+ cmpbge zero, t1, t3 # E : bits set iff byte == c
+ or t2, t3, t0 # E : bits set iff char match or zero match
+
+ andnot t0, t4, t0 # E : clear garbage bits
+ cttz t0, a2 # U0 : speculative (in case we get a match)
+ nop # E :
+ bne t0, $found # U :
+
+ /*
+ * Yuk. This loop is going to stall like crazy waiting for the
+ * data to be loaded. Not much can be done about it unless it's
+ * unrolled multiple times - is that safe to do in kernel space?
+ * Or would exception handling recovery code do the trick here?
+ */
+$loop: ldq t0, 8(v0) # L : Latency=3
+ addq v0, 8, v0 # E :
+ xor t0, a1, t1 # E :
+ cmpbge zero, t0, t2 # E : bits set iff byte == 0
+
+ cmpbge zero, t1, t3 # E : bits set iff byte == c
+ or t2, t3, t0 # E :
+ cttz t3, a2 # U0 : speculative (in case we get a match)
+ beq t0, $loop # U :
+
+$found: negq t0, t1 # E : clear all but least set bit
+ and t0, t1, t0 # E :
+ and t0, t3, t1 # E : bit set iff byte was the char
+ addq v0, a2, v0 # E : Add in the bit number from above
+
+ cmoveq t1, $31, v0 # E : Two mapping slots, latency = 2
+ nop
+ nop
+ ret # L0 :
+
+ .end strchr
diff --git a/arch/alpha/lib/ev67-strlen.S b/arch/alpha/lib/ev67-strlen.S
new file mode 100644
index 000000000000..503928072523
--- /dev/null
+++ b/arch/alpha/lib/ev67-strlen.S
@@ -0,0 +1,49 @@
+/*
+ * arch/alpha/lib/ev67-strlen.S
+ * 21264 version by Rick Gorton <rick.gorton@alpha-processor.com>
+ *
+ * Finds length of a 0-terminated string. Optimized for the
+ * Alpha architecture:
+ *
+ * - memory accessed as aligned quadwords only
+ * - uses bcmpge to compare 8 bytes in parallel
+ *
+ * Much of the information about 21264 scheduling/coding comes from:
+ * Compiler Writer's Guide for the Alpha 21264
+ * abbreviated as 'CWG' in other comments here
+ * ftp.digital.com/pub/Digital/info/semiconductor/literature/dsc-library.html
+ * Scheduling notation:
+ * E - either cluster
+ * U - upper subcluster; U0 - subcluster U0; U1 - subcluster U1
+ * L - lower subcluster; L0 - subcluster L0; L1 - subcluster L1
+ */
+
+ .set noreorder
+ .set noat
+
+ .globl strlen
+ .ent strlen
+ .align 4
+strlen:
+ ldq_u $1, 0($16) # L : load first quadword ($16 may be misaligned)
+ lda $2, -1($31) # E :
+ insqh $2, $16, $2 # U :
+ andnot $16, 7, $0 # E :
+
+ or $2, $1, $1 # E :
+ cmpbge $31, $1, $2 # E : $2 <- bitmask: bit i == 1 <==> i-th byte == 0
+ nop # E :
+ bne $2, $found # U :
+
+$loop: ldq $1, 8($0) # L :
+ addq $0, 8, $0 # E : addr += 8
+ cmpbge $31, $1, $2 # E :
+ beq $2, $loop # U :
+
+$found:
+ cttz $2, $3 # U0 :
+ addq $0, $3, $0 # E :
+ subq $0, $16, $0 # E :
+ ret $31, ($26) # L0 :
+
+ .end strlen
diff --git a/arch/alpha/lib/ev67-strlen_user.S b/arch/alpha/lib/ev67-strlen_user.S
new file mode 100644
index 000000000000..57e0d77b81a6
--- /dev/null
+++ b/arch/alpha/lib/ev67-strlen_user.S
@@ -0,0 +1,107 @@
+/*
+ * arch/alpha/lib/ev67-strlen_user.S
+ * 21264 version contributed by Rick Gorton <rick.gorton@api-networks.com>
+ *
+ * Return the length of the string including the NULL terminator
+ * (strlen+1) or zero if an error occurred.
+ *
+ * In places where it is critical to limit the processing time,
+ * and the data is not trusted, strnlen_user() should be used.
+ * It will return a value greater than its second argument if
+ * that limit would be exceeded. This implementation is allowed
+ * to access memory beyond the limit, but will not cross a page
+ * boundary when doing so.
+ *
+ * Much of the information about 21264 scheduling/coding comes from:
+ * Compiler Writer's Guide for the Alpha 21264
+ * abbreviated as 'CWG' in other comments here
+ * ftp.digital.com/pub/Digital/info/semiconductor/literature/dsc-library.html
+ * Scheduling notation:
+ * E - either cluster
+ * U - upper subcluster; U0 - subcluster U0; U1 - subcluster U1
+ * L - lower subcluster; L0 - subcluster L0; L1 - subcluster L1
+ * Try not to change the actual algorithm if possible for consistency.
+ */
+
+#include <asm/regdef.h>
+
+
+/* Allow an exception for an insn; exit if we get one. */
+#define EX(x,y...) \
+ 99: x,##y; \
+ .section __ex_table,"a"; \
+ .long 99b - .; \
+ lda v0, $exception-99b(zero); \
+ .previous
+
+
+ .set noreorder
+ .set noat
+ .text
+
+ .globl __strlen_user
+ .ent __strlen_user
+ .frame sp, 0, ra
+
+ .align 4
+__strlen_user:
+ ldah a1, 32767(zero) # do not use plain strlen_user() for strings
+ # that might be almost 2 GB long; you should
+ # be using strnlen_user() instead
+ nop
+ nop
+ nop
+
+ .globl __strnlen_user
+
+ .align 4
+__strnlen_user:
+ .prologue 0
+ EX( ldq_u t0, 0(a0) ) # L : load first quadword (a0 may be misaligned)
+ lda t1, -1(zero) # E :
+
+ insqh t1, a0, t1 # U :
+ andnot a0, 7, v0 # E :
+ or t1, t0, t0 # E :
+ subq a0, 1, a0 # E : get our +1 for the return
+
+ cmpbge zero, t0, t1 # E : t1 <- bitmask: bit i == 1 <==> i-th byte == 0
+ subq a1, 7, t2 # E :
+ subq a0, v0, t0 # E :
+ bne t1, $found # U :
+
+ addq t2, t0, t2 # E :
+ addq a1, 1, a1 # E :
+ nop # E :
+ nop # E :
+
+ .align 4
+$loop: ble t2, $limit # U :
+ EX( ldq t0, 8(v0) ) # L :
+ nop # E :
+ nop # E :
+
+ cmpbge zero, t0, t1 # E :
+ subq t2, 8, t2 # E :
+ addq v0, 8, v0 # E : addr += 8
+ beq t1, $loop # U :
+
+$found: cttz t1, t2 # U0 :
+ addq v0, t2, v0 # E :
+ subq v0, a0, v0 # E :
+ ret # L0 :
+
+$exception:
+ nop
+ nop
+ nop
+ ret
+
+ .align 4 # currently redundant
+$limit:
+ nop
+ nop
+ subq a1, t2, v0
+ ret
+
+ .end __strlen_user
diff --git a/arch/alpha/lib/ev67-strncat.S b/arch/alpha/lib/ev67-strncat.S
new file mode 100644
index 000000000000..4ae716cd2bfb
--- /dev/null
+++ b/arch/alpha/lib/ev67-strncat.S
@@ -0,0 +1,94 @@
+/*
+ * arch/alpha/lib/ev67-strncat.S
+ * 21264 version contributed by Rick Gorton <rick.gorton@api-networks.com>
+ *
+ * Append no more than COUNT characters from the null-terminated string SRC
+ * to the null-terminated string DST. Always null-terminate the new DST.
+ *
+ * This differs slightly from the semantics in libc in that we never write
+ * past count, whereas libc may write to count+1. This follows the generic
+ * implementation in lib/string.c and is, IMHO, more sensible.
+ *
+ * Much of the information about 21264 scheduling/coding comes from:
+ * Compiler Writer's Guide for the Alpha 21264
+ * abbreviated as 'CWG' in other comments here
+ * ftp.digital.com/pub/Digital/info/semiconductor/literature/dsc-library.html
+ * Scheduling notation:
+ * E - either cluster
+ * U - upper subcluster; U0 - subcluster U0; U1 - subcluster U1
+ * L - lower subcluster; L0 - subcluster L0; L1 - subcluster L1
+ * Try not to change the actual algorithm if possible for consistency.
+ */
+
+
+ .text
+
+ .align 4
+ .globl strncat
+ .ent strncat
+strncat:
+ .frame $30, 0, $26
+ .prologue 0
+
+ mov $16, $0 # set up return value
+ beq $18, $zerocount # U :
+ /* Find the end of the string. */
+ ldq_u $1, 0($16) # L : load first quadword ($16 may be misaligned)
+ lda $2, -1($31) # E :
+
+ insqh $2, $0, $2 # U :
+ andnot $16, 7, $16 # E :
+ nop # E :
+ or $2, $1, $1 # E :
+
+ nop # E :
+ nop # E :
+ cmpbge $31, $1, $2 # E : bits set iff byte == 0
+ bne $2, $found # U :
+
+$loop: ldq $1, 8($16) # L :
+ addq $16, 8, $16 # E :
+ cmpbge $31, $1, $2 # E :
+ beq $2, $loop # U :
+
+$found: cttz $2, $3 # U0 :
+ addq $16, $3, $16 # E :
+ nop # E :
+ bsr $23, __stxncpy # L0 :/* Now do the append. */
+
+ /* Worry about the null termination. */
+
+ zapnot $1, $27, $2 # U : was last byte a null?
+ cmplt $27, $24, $5 # E : did we fill the buffer completely?
+ bne $2, 0f # U :
+ ret # L0 :
+
+0: or $5, $18, $2 # E :
+ nop
+ bne $2, 2f # U :
+ and $24, 0x80, $3 # E : no zero next byte
+
+ nop # E :
+ bne $3, 1f # U :
+ /* Here there are bytes left in the current word. Clear one. */
+ addq $24, $24, $24 # E : end-of-count bit <<= 1
+ nop # E :
+
+2: zap $1, $24, $1 # U :
+ nop # E :
+ stq_u $1, 0($16) # L :
+ ret # L0 :
+
+1: /* Here we must clear the first byte of the next DST word */
+ stb $31, 8($16) # L :
+ nop # E :
+ nop # E :
+ ret # L0 :
+
+$zerocount:
+ nop # E :
+ nop # E :
+ nop # E :
+ ret # L0 :
+
+ .end strncat
diff --git a/arch/alpha/lib/ev67-strrchr.S b/arch/alpha/lib/ev67-strrchr.S
new file mode 100644
index 000000000000..3fd8bf414c7b
--- /dev/null
+++ b/arch/alpha/lib/ev67-strrchr.S
@@ -0,0 +1,109 @@
+/*
+ * arch/alpha/lib/ev67-strrchr.S
+ * 21264 version by Rick Gorton <rick.gorton@alpha-processor.com>
+ *
+ * Finds length of a 0-terminated string. Optimized for the
+ * Alpha architecture:
+ *
+ * - memory accessed as aligned quadwords only
+ * - uses bcmpge to compare 8 bytes in parallel
+ *
+ * Much of the information about 21264 scheduling/coding comes from:
+ * Compiler Writer's Guide for the Alpha 21264
+ * abbreviated as 'CWG' in other comments here
+ * ftp.digital.com/pub/Digital/info/semiconductor/literature/dsc-library.html
+ * Scheduling notation:
+ * E - either cluster
+ * U - upper subcluster; U0 - subcluster U0; U1 - subcluster U1
+ * L - lower subcluster; L0 - subcluster L0; L1 - subcluster L1
+ */
+
+
+#include <asm/regdef.h>
+
+ .set noreorder
+ .set noat
+
+ .align 4
+ .ent strrchr
+ .globl strrchr
+strrchr:
+ .frame sp, 0, ra
+ .prologue 0
+
+ and a1, 0xff, t2 # E : 00000000000000ch
+ insbl a1, 1, t4 # U : 000000000000ch00
+ insbl a1, 2, t5 # U : 0000000000ch0000
+ ldq_u t0, 0(a0) # L : load first quadword Latency=3
+
+ mov zero, t6 # E : t6 is last match aligned addr
+ or t2, t4, a1 # E : 000000000000chch
+ sll t5, 8, t3 # U : 00000000ch000000
+ mov zero, t8 # E : t8 is last match byte compare mask
+
+ andnot a0, 7, v0 # E : align source addr
+ or t5, t3, t3 # E : 00000000chch0000
+ sll a1, 32, t2 # U : 0000chch00000000
+ sll a1, 48, t4 # U : chch000000000000
+
+ or t4, a1, a1 # E : chch00000000chch
+ or t2, t3, t2 # E : 0000chchchch0000
+ or a1, t2, a1 # E : chchchchchchchch
+ lda t5, -1 # E : build garbage mask
+
+ cmpbge zero, t0, t1 # E : bits set iff byte == zero
+ mskqh t5, a0, t4 # E : Complete garbage mask
+ xor t0, a1, t2 # E : make bytes == c zero
+ cmpbge zero, t4, t4 # E : bits set iff byte is garbage
+
+ cmpbge zero, t2, t3 # E : bits set iff byte == c
+ andnot t1, t4, t1 # E : clear garbage from null test
+ andnot t3, t4, t3 # E : clear garbage from char test
+ bne t1, $eos # U : did we already hit the terminator?
+
+ /* Character search main loop */
+$loop:
+ ldq t0, 8(v0) # L : load next quadword
+ cmovne t3, v0, t6 # E : save previous comparisons match
+ nop # : Latency=2, extra map slot (keep nop with cmov)
+ nop
+
+ cmovne t3, t3, t8 # E : Latency=2, extra map slot
+ nop # : keep with cmovne
+ addq v0, 8, v0 # E :
+ xor t0, a1, t2 # E :
+
+ cmpbge zero, t0, t1 # E : bits set iff byte == zero
+ cmpbge zero, t2, t3 # E : bits set iff byte == c
+ beq t1, $loop # U : if we havnt seen a null, loop
+ nop
+
+ /* Mask out character matches after terminator */
+$eos:
+ negq t1, t4 # E : isolate first null byte match
+ and t1, t4, t4 # E :
+ subq t4, 1, t5 # E : build a mask of the bytes upto...
+ or t4, t5, t4 # E : ... and including the null
+
+ and t3, t4, t3 # E : mask out char matches after null
+ cmovne t3, t3, t8 # E : save it, if match found Latency=2, extra map slot
+ nop # : Keep with cmovne
+ nop
+
+ cmovne t3, v0, t6 # E :
+ nop # : Keep with cmovne
+ /* Locate the address of the last matched character */
+ ctlz t8, t2 # U0 : Latency=3 (0x40 for t8=0)
+ nop
+
+ cmoveq t8, 0x3f, t2 # E : Compensate for case when no match is seen
+ nop # E : hide the cmov latency (2) behind ctlz latency
+ lda t5, 0x3f($31) # E :
+ subq t5, t2, t5 # E : Normalize leading zero count
+
+ addq t6, t5, v0 # E : and add to quadword address
+ ret # L0 : Latency=3
+ nop
+ nop
+
+ .end strrchr
diff --git a/arch/alpha/lib/fpreg.c b/arch/alpha/lib/fpreg.c
new file mode 100644
index 000000000000..97c4d9d7a4d5
--- /dev/null
+++ b/arch/alpha/lib/fpreg.c
@@ -0,0 +1,193 @@
+/*
+ * arch/alpha/lib/fpreg.c
+ *
+ * (C) Copyright 1998 Linus Torvalds
+ */
+
+#if defined(__alpha_cix__) || defined(__alpha_fix__)
+#define STT(reg,val) asm volatile ("ftoit $f"#reg",%0" : "=r"(val));
+#else
+#define STT(reg,val) asm volatile ("stt $f"#reg",%0" : "=m"(val));
+#endif
+
+unsigned long
+alpha_read_fp_reg (unsigned long reg)
+{
+ unsigned long val;
+
+ switch (reg) {
+ case 0: STT( 0, val); break;
+ case 1: STT( 1, val); break;
+ case 2: STT( 2, val); break;
+ case 3: STT( 3, val); break;
+ case 4: STT( 4, val); break;
+ case 5: STT( 5, val); break;
+ case 6: STT( 6, val); break;
+ case 7: STT( 7, val); break;
+ case 8: STT( 8, val); break;
+ case 9: STT( 9, val); break;
+ case 10: STT(10, val); break;
+ case 11: STT(11, val); break;
+ case 12: STT(12, val); break;
+ case 13: STT(13, val); break;
+ case 14: STT(14, val); break;
+ case 15: STT(15, val); break;
+ case 16: STT(16, val); break;
+ case 17: STT(17, val); break;
+ case 18: STT(18, val); break;
+ case 19: STT(19, val); break;
+ case 20: STT(20, val); break;
+ case 21: STT(21, val); break;
+ case 22: STT(22, val); break;
+ case 23: STT(23, val); break;
+ case 24: STT(24, val); break;
+ case 25: STT(25, val); break;
+ case 26: STT(26, val); break;
+ case 27: STT(27, val); break;
+ case 28: STT(28, val); break;
+ case 29: STT(29, val); break;
+ case 30: STT(30, val); break;
+ case 31: STT(31, val); break;
+ default: return 0;
+ }
+ return val;
+}
+
+#if defined(__alpha_cix__) || defined(__alpha_fix__)
+#define LDT(reg,val) asm volatile ("itoft %0,$f"#reg : : "r"(val));
+#else
+#define LDT(reg,val) asm volatile ("ldt $f"#reg",%0" : : "m"(val));
+#endif
+
+void
+alpha_write_fp_reg (unsigned long reg, unsigned long val)
+{
+ switch (reg) {
+ case 0: LDT( 0, val); break;
+ case 1: LDT( 1, val); break;
+ case 2: LDT( 2, val); break;
+ case 3: LDT( 3, val); break;
+ case 4: LDT( 4, val); break;
+ case 5: LDT( 5, val); break;
+ case 6: LDT( 6, val); break;
+ case 7: LDT( 7, val); break;
+ case 8: LDT( 8, val); break;
+ case 9: LDT( 9, val); break;
+ case 10: LDT(10, val); break;
+ case 11: LDT(11, val); break;
+ case 12: LDT(12, val); break;
+ case 13: LDT(13, val); break;
+ case 14: LDT(14, val); break;
+ case 15: LDT(15, val); break;
+ case 16: LDT(16, val); break;
+ case 17: LDT(17, val); break;
+ case 18: LDT(18, val); break;
+ case 19: LDT(19, val); break;
+ case 20: LDT(20, val); break;
+ case 21: LDT(21, val); break;
+ case 22: LDT(22, val); break;
+ case 23: LDT(23, val); break;
+ case 24: LDT(24, val); break;
+ case 25: LDT(25, val); break;
+ case 26: LDT(26, val); break;
+ case 27: LDT(27, val); break;
+ case 28: LDT(28, val); break;
+ case 29: LDT(29, val); break;
+ case 30: LDT(30, val); break;
+ case 31: LDT(31, val); break;
+ }
+}
+
+#if defined(__alpha_cix__) || defined(__alpha_fix__)
+#define STS(reg,val) asm volatile ("ftois $f"#reg",%0" : "=r"(val));
+#else
+#define STS(reg,val) asm volatile ("sts $f"#reg",%0" : "=m"(val));
+#endif
+
+unsigned long
+alpha_read_fp_reg_s (unsigned long reg)
+{
+ unsigned long val;
+
+ switch (reg) {
+ case 0: STS( 0, val); break;
+ case 1: STS( 1, val); break;
+ case 2: STS( 2, val); break;
+ case 3: STS( 3, val); break;
+ case 4: STS( 4, val); break;
+ case 5: STS( 5, val); break;
+ case 6: STS( 6, val); break;
+ case 7: STS( 7, val); break;
+ case 8: STS( 8, val); break;
+ case 9: STS( 9, val); break;
+ case 10: STS(10, val); break;
+ case 11: STS(11, val); break;
+ case 12: STS(12, val); break;
+ case 13: STS(13, val); break;
+ case 14: STS(14, val); break;
+ case 15: STS(15, val); break;
+ case 16: STS(16, val); break;
+ case 17: STS(17, val); break;
+ case 18: STS(18, val); break;
+ case 19: STS(19, val); break;
+ case 20: STS(20, val); break;
+ case 21: STS(21, val); break;
+ case 22: STS(22, val); break;
+ case 23: STS(23, val); break;
+ case 24: STS(24, val); break;
+ case 25: STS(25, val); break;
+ case 26: STS(26, val); break;
+ case 27: STS(27, val); break;
+ case 28: STS(28, val); break;
+ case 29: STS(29, val); break;
+ case 30: STS(30, val); break;
+ case 31: STS(31, val); break;
+ default: return 0;
+ }
+ return val;
+}
+
+#if defined(__alpha_cix__) || defined(__alpha_fix__)
+#define LDS(reg,val) asm volatile ("itofs %0,$f"#reg : : "r"(val));
+#else
+#define LDS(reg,val) asm volatile ("lds $f"#reg",%0" : : "m"(val));
+#endif
+
+void
+alpha_write_fp_reg_s (unsigned long reg, unsigned long val)
+{
+ switch (reg) {
+ case 0: LDS( 0, val); break;
+ case 1: LDS( 1, val); break;
+ case 2: LDS( 2, val); break;
+ case 3: LDS( 3, val); break;
+ case 4: LDS( 4, val); break;
+ case 5: LDS( 5, val); break;
+ case 6: LDS( 6, val); break;
+ case 7: LDS( 7, val); break;
+ case 8: LDS( 8, val); break;
+ case 9: LDS( 9, val); break;
+ case 10: LDS(10, val); break;
+ case 11: LDS(11, val); break;
+ case 12: LDS(12, val); break;
+ case 13: LDS(13, val); break;
+ case 14: LDS(14, val); break;
+ case 15: LDS(15, val); break;
+ case 16: LDS(16, val); break;
+ case 17: LDS(17, val); break;
+ case 18: LDS(18, val); break;
+ case 19: LDS(19, val); break;
+ case 20: LDS(20, val); break;
+ case 21: LDS(21, val); break;
+ case 22: LDS(22, val); break;
+ case 23: LDS(23, val); break;
+ case 24: LDS(24, val); break;
+ case 25: LDS(25, val); break;
+ case 26: LDS(26, val); break;
+ case 27: LDS(27, val); break;
+ case 28: LDS(28, val); break;
+ case 29: LDS(29, val); break;
+ case 30: LDS(30, val); break;
+ case 31: LDS(31, val); break;
+ }
+}
diff --git a/arch/alpha/lib/memchr.S b/arch/alpha/lib/memchr.S
new file mode 100644
index 000000000000..14427eeb555e
--- /dev/null
+++ b/arch/alpha/lib/memchr.S
@@ -0,0 +1,164 @@
+/* Copyright (C) 1996 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+ Contributed by David Mosberger (davidm@cs.arizona.edu).
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Library General Public License as
+ published by the Free Software Foundation; either version 2 of the
+ License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Library General Public License for more details.
+
+ You should have received a copy of the GNU Library General Public
+ License along with the GNU C Library; see the file COPYING.LIB. If not,
+ write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ Boston, MA 02111-1307, USA. */
+
+/* Finds characters in a memory area. Optimized for the Alpha:
+
+ - memory accessed as aligned quadwords only
+ - uses cmpbge to compare 8 bytes in parallel
+ - does binary search to find 0 byte in last
+ quadword (HAKMEM needed 12 instructions to
+ do this instead of the 9 instructions that
+ binary search needs).
+
+For correctness consider that:
+
+ - only minimum number of quadwords may be accessed
+ - the third argument is an unsigned long
+*/
+
+ .set noreorder
+ .set noat
+
+ .globl memchr
+ .ent memchr
+memchr:
+ .frame $30,0,$26,0
+ .prologue 0
+
+ # Hack -- if someone passes in (size_t)-1, hoping to just
+ # search til the end of the address space, we will overflow
+ # below when we find the address of the last byte. Given
+ # that we will never have a 56-bit address space, cropping
+ # the length is the easiest way to avoid trouble.
+ zap $18, 0x80, $5 #-e0 :
+
+ beq $18, $not_found # .. e1 :
+ ldq_u $1, 0($16) # e1 : load first quadword
+ insbl $17, 1, $2 # .. e0 : $2 = 000000000000ch00
+ and $17, 0xff, $17 #-e0 : $17 = 00000000000000ch
+ cmpult $18, 9, $4 # .. e1 :
+ or $2, $17, $17 # e0 : $17 = 000000000000chch
+ lda $3, -1($31) # .. e1 :
+ sll $17, 16, $2 #-e0 : $2 = 00000000chch0000
+ addq $16, $5, $5 # .. e1 :
+ or $2, $17, $17 # e1 : $17 = 00000000chchchch
+ unop # :
+ sll $17, 32, $2 #-e0 : $2 = chchchch00000000
+ or $2, $17, $17 # e1 : $17 = chchchchchchchch
+ extql $1, $16, $7 # e0 :
+ beq $4, $first_quad # .. e1 :
+
+ ldq_u $6, -1($5) #-e1 : eight or less bytes to search
+ extqh $6, $16, $6 # .. e0 :
+ mov $16, $0 # e0 :
+ or $7, $6, $1 # .. e1 : $1 = quadword starting at $16
+
+ # Deal with the case where at most 8 bytes remain to be searched
+ # in $1. E.g.:
+ # $18 = 6
+ # $1 = ????c6c5c4c3c2c1
+$last_quad:
+ negq $18, $6 #-e0 :
+ xor $17, $1, $1 # .. e1 :
+ srl $3, $6, $6 # e0 : $6 = mask of $18 bits set
+ cmpbge $31, $1, $2 # .. e1 :
+ and $2, $6, $2 #-e0 :
+ beq $2, $not_found # .. e1 :
+
+$found_it:
+ # Now, determine which byte matched:
+ negq $2, $3 # e0 :
+ and $2, $3, $2 # e1 :
+
+ and $2, 0x0f, $1 #-e0 :
+ addq $0, 4, $3 # .. e1 :
+ cmoveq $1, $3, $0 # e0 :
+
+ addq $0, 2, $3 # .. e1 :
+ and $2, 0x33, $1 #-e0 :
+ cmoveq $1, $3, $0 # .. e1 :
+
+ and $2, 0x55, $1 # e0 :
+ addq $0, 1, $3 # .. e1 :
+ cmoveq $1, $3, $0 #-e0 :
+
+$done: ret # .. e1 :
+
+ # Deal with the case where $18 > 8 bytes remain to be
+ # searched. $16 may not be aligned.
+ .align 4
+$first_quad:
+ andnot $16, 0x7, $0 #-e1 :
+ insqh $3, $16, $2 # .. e0 : $2 = 0000ffffffffffff ($16<0:2> ff)
+ xor $1, $17, $1 # e0 :
+ or $1, $2, $1 # e1 : $1 = ====ffffffffffff
+ cmpbge $31, $1, $2 #-e0 :
+ bne $2, $found_it # .. e1 :
+
+ # At least one byte left to process.
+
+ ldq $1, 8($0) # e0 :
+ subq $5, 1, $18 # .. e1 :
+ addq $0, 8, $0 #-e0 :
+
+ # Make $18 point to last quad to be accessed (the
+ # last quad may or may not be partial).
+
+ andnot $18, 0x7, $18 # .. e1 :
+ cmpult $0, $18, $2 # e0 :
+ beq $2, $final # .. e1 :
+
+ # At least two quads remain to be accessed.
+
+ subq $18, $0, $4 #-e0 : $4 <- nr quads to be processed
+ and $4, 8, $4 # e1 : odd number of quads?
+ bne $4, $odd_quad_count # e1 :
+
+ # At least three quads remain to be accessed
+
+ mov $1, $4 # e0 : move prefetched value to correct reg
+
+ .align 4
+$unrolled_loop:
+ ldq $1, 8($0) #-e0 : prefetch $1
+ xor $17, $4, $2 # .. e1 :
+ cmpbge $31, $2, $2 # e0 :
+ bne $2, $found_it # .. e1 :
+
+ addq $0, 8, $0 #-e0 :
+$odd_quad_count:
+ xor $17, $1, $2 # .. e1 :
+ ldq $4, 8($0) # e0 : prefetch $4
+ cmpbge $31, $2, $2 # .. e1 :
+ addq $0, 8, $6 #-e0 :
+ bne $2, $found_it # .. e1 :
+
+ cmpult $6, $18, $6 # e0 :
+ addq $0, 8, $0 # .. e1 :
+ bne $6, $unrolled_loop #-e1 :
+
+ mov $4, $1 # e0 : move prefetched value into $1
+$final: subq $5, $0, $18 # .. e1 : $18 <- number of bytes left to do
+ bne $18, $last_quad # e1 :
+
+$not_found:
+ mov $31, $0 #-e0 :
+ ret # .. e1 :
+
+ .end memchr
diff --git a/arch/alpha/lib/memcpy.c b/arch/alpha/lib/memcpy.c
new file mode 100644
index 000000000000..64083fc73238
--- /dev/null
+++ b/arch/alpha/lib/memcpy.c
@@ -0,0 +1,163 @@
+/*
+ * linux/arch/alpha/lib/memcpy.c
+ *
+ * Copyright (C) 1995 Linus Torvalds
+ */
+
+/*
+ * This is a reasonably optimized memcpy() routine.
+ */
+
+/*
+ * Note that the C code is written to be optimized into good assembly. However,
+ * at this point gcc is unable to sanely compile "if (n >= 0)", resulting in a
+ * explicit compare against 0 (instead of just using the proper "blt reg, xx" or
+ * "bge reg, xx"). I hope alpha-gcc will be fixed to notice this eventually..
+ */
+
+#include <linux/types.h>
+
+/*
+ * This should be done in one go with ldq_u*2/mask/stq_u. Do it
+ * with a macro so that we can fix it up later..
+ */
+#define ALIGN_DEST_TO8_UP(d,s,n) \
+ while (d & 7) { \
+ if (n <= 0) return; \
+ n--; \
+ *(char *) d = *(char *) s; \
+ d++; s++; \
+ }
+#define ALIGN_DEST_TO8_DN(d,s,n) \
+ while (d & 7) { \
+ if (n <= 0) return; \
+ n--; \
+ d--; s--; \
+ *(char *) d = *(char *) s; \
+ }
+
+/*
+ * This should similarly be done with ldq_u*2/mask/stq. The destination
+ * is aligned, but we don't fill in a full quad-word
+ */
+#define DO_REST_UP(d,s,n) \
+ while (n > 0) { \
+ n--; \
+ *(char *) d = *(char *) s; \
+ d++; s++; \
+ }
+#define DO_REST_DN(d,s,n) \
+ while (n > 0) { \
+ n--; \
+ d--; s--; \
+ *(char *) d = *(char *) s; \
+ }
+
+/*
+ * This should be done with ldq/mask/stq. The source and destination are
+ * aligned, but we don't fill in a full quad-word
+ */
+#define DO_REST_ALIGNED_UP(d,s,n) DO_REST_UP(d,s,n)
+#define DO_REST_ALIGNED_DN(d,s,n) DO_REST_DN(d,s,n)
+
+/*
+ * This does unaligned memory copies. We want to avoid storing to
+ * an unaligned address, as that would do a read-modify-write cycle.
+ * We also want to avoid double-reading the unaligned reads.
+ *
+ * Note the ordering to try to avoid load (and address generation) latencies.
+ */
+static inline void __memcpy_unaligned_up (unsigned long d, unsigned long s,
+ long n)
+{
+ ALIGN_DEST_TO8_UP(d,s,n);
+ n -= 8; /* to avoid compare against 8 in the loop */
+ if (n >= 0) {
+ unsigned long low_word, high_word;
+ __asm__("ldq_u %0,%1":"=r" (low_word):"m" (*(unsigned long *) s));
+ do {
+ unsigned long tmp;
+ __asm__("ldq_u %0,%1":"=r" (high_word):"m" (*(unsigned long *)(s+8)));
+ n -= 8;
+ __asm__("extql %1,%2,%0"
+ :"=r" (low_word)
+ :"r" (low_word), "r" (s));
+ __asm__("extqh %1,%2,%0"
+ :"=r" (tmp)
+ :"r" (high_word), "r" (s));
+ s += 8;
+ *(unsigned long *) d = low_word | tmp;
+ d += 8;
+ low_word = high_word;
+ } while (n >= 0);
+ }
+ n += 8;
+ DO_REST_UP(d,s,n);
+}
+
+static inline void __memcpy_unaligned_dn (unsigned long d, unsigned long s,
+ long n)
+{
+ /* I don't understand AXP assembler well enough for this. -Tim */
+ s += n;
+ d += n;
+ while (n--)
+ * (char *) --d = * (char *) --s;
+}
+
+/*
+ * Hmm.. Strange. The __asm__ here is there to make gcc use an integer register
+ * for the load-store. I don't know why, but it would seem that using a floating
+ * point register for the move seems to slow things down (very small difference,
+ * though).
+ *
+ * Note the ordering to try to avoid load (and address generation) latencies.
+ */
+static inline void __memcpy_aligned_up (unsigned long d, unsigned long s,
+ long n)
+{
+ ALIGN_DEST_TO8_UP(d,s,n);
+ n -= 8;
+ while (n >= 0) {
+ unsigned long tmp;
+ __asm__("ldq %0,%1":"=r" (tmp):"m" (*(unsigned long *) s));
+ n -= 8;
+ s += 8;
+ *(unsigned long *) d = tmp;
+ d += 8;
+ }
+ n += 8;
+ DO_REST_ALIGNED_UP(d,s,n);
+}
+static inline void __memcpy_aligned_dn (unsigned long d, unsigned long s,
+ long n)
+{
+ s += n;
+ d += n;
+ ALIGN_DEST_TO8_DN(d,s,n);
+ n -= 8;
+ while (n >= 0) {
+ unsigned long tmp;
+ s -= 8;
+ __asm__("ldq %0,%1":"=r" (tmp):"m" (*(unsigned long *) s));
+ n -= 8;
+ d -= 8;
+ *(unsigned long *) d = tmp;
+ }
+ n += 8;
+ DO_REST_ALIGNED_DN(d,s,n);
+}
+
+void * memcpy(void * dest, const void *src, size_t n)
+{
+ if (!(((unsigned long) dest ^ (unsigned long) src) & 7)) {
+ __memcpy_aligned_up ((unsigned long) dest, (unsigned long) src,
+ n);
+ return dest;
+ }
+ __memcpy_unaligned_up ((unsigned long) dest, (unsigned long) src, n);
+ return dest;
+}
+
+/* For backward modules compatibility, define __memcpy. */
+asm("__memcpy = memcpy; .globl __memcpy");
diff --git a/arch/alpha/lib/memmove.S b/arch/alpha/lib/memmove.S
new file mode 100644
index 000000000000..eb3b6e02242f
--- /dev/null
+++ b/arch/alpha/lib/memmove.S
@@ -0,0 +1,181 @@
+/*
+ * arch/alpha/lib/memmove.S
+ *
+ * Barely optimized memmove routine for Alpha EV5.
+ *
+ * This is hand-massaged output from the original memcpy.c. We defer to
+ * memcpy whenever possible; the backwards copy loops are not unrolled.
+ */
+
+ .set noat
+ .set noreorder
+ .text
+
+ .align 4
+ .globl memmove
+ .ent memmove
+memmove:
+ ldgp $29, 0($27)
+ unop
+ nop
+ .prologue 1
+
+ addq $16,$18,$4
+ addq $17,$18,$5
+ cmpule $4,$17,$1 /* dest + n <= src */
+ cmpule $5,$16,$2 /* dest >= src + n */
+
+ bis $1,$2,$1
+ mov $16,$0
+ xor $16,$17,$2
+ bne $1,memcpy !samegp
+
+ and $2,7,$2 /* Test for src/dest co-alignment. */
+ and $16,7,$1
+ cmpule $16,$17,$3
+ bne $3,$memmove_up /* dest < src */
+
+ and $4,7,$1
+ bne $2,$misaligned_dn
+ unop
+ beq $1,$skip_aligned_byte_loop_head_dn
+
+$aligned_byte_loop_head_dn:
+ lda $4,-1($4)
+ lda $5,-1($5)
+ unop
+ ble $18,$egress
+
+ ldq_u $3,0($5)
+ ldq_u $2,0($4)
+ lda $18,-1($18)
+ extbl $3,$5,$1
+
+ insbl $1,$4,$1
+ mskbl $2,$4,$2
+ bis $1,$2,$1
+ and $4,7,$6
+
+ stq_u $1,0($4)
+ bne $6,$aligned_byte_loop_head_dn
+
+$skip_aligned_byte_loop_head_dn:
+ lda $18,-8($18)
+ blt $18,$skip_aligned_word_loop_dn
+
+$aligned_word_loop_dn:
+ ldq $1,-8($5)
+ nop
+ lda $5,-8($5)
+ lda $18,-8($18)
+
+ stq $1,-8($4)
+ nop
+ lda $4,-8($4)
+ bge $18,$aligned_word_loop_dn
+
+$skip_aligned_word_loop_dn:
+ lda $18,8($18)
+ bgt $18,$byte_loop_tail_dn
+ unop
+ ret $31,($26),1
+
+ .align 4
+$misaligned_dn:
+ nop
+ fnop
+ unop
+ beq $18,$egress
+
+$byte_loop_tail_dn:
+ ldq_u $3,-1($5)
+ ldq_u $2,-1($4)
+ lda $5,-1($5)
+ lda $4,-1($4)
+
+ lda $18,-1($18)
+ extbl $3,$5,$1
+ insbl $1,$4,$1
+ mskbl $2,$4,$2
+
+ bis $1,$2,$1
+ stq_u $1,0($4)
+ bgt $18,$byte_loop_tail_dn
+ br $egress
+
+$memmove_up:
+ mov $16,$4
+ mov $17,$5
+ bne $2,$misaligned_up
+ beq $1,$skip_aligned_byte_loop_head_up
+
+$aligned_byte_loop_head_up:
+ unop
+ ble $18,$egress
+ ldq_u $3,0($5)
+ ldq_u $2,0($4)
+
+ lda $18,-1($18)
+ extbl $3,$5,$1
+ insbl $1,$4,$1
+ mskbl $2,$4,$2
+
+ bis $1,$2,$1
+ lda $5,1($5)
+ stq_u $1,0($4)
+ lda $4,1($4)
+
+ and $4,7,$6
+ bne $6,$aligned_byte_loop_head_up
+
+$skip_aligned_byte_loop_head_up:
+ lda $18,-8($18)
+ blt $18,$skip_aligned_word_loop_up
+
+$aligned_word_loop_up:
+ ldq $1,0($5)
+ nop
+ lda $5,8($5)
+ lda $18,-8($18)
+
+ stq $1,0($4)
+ nop
+ lda $4,8($4)
+ bge $18,$aligned_word_loop_up
+
+$skip_aligned_word_loop_up:
+ lda $18,8($18)
+ bgt $18,$byte_loop_tail_up
+ unop
+ ret $31,($26),1
+
+ .align 4
+$misaligned_up:
+ nop
+ fnop
+ unop
+ beq $18,$egress
+
+$byte_loop_tail_up:
+ ldq_u $3,0($5)
+ ldq_u $2,0($4)
+ lda $18,-1($18)
+ extbl $3,$5,$1
+
+ insbl $1,$4,$1
+ mskbl $2,$4,$2
+ bis $1,$2,$1
+ stq_u $1,0($4)
+
+ lda $5,1($5)
+ lda $4,1($4)
+ nop
+ bgt $18,$byte_loop_tail_up
+
+$egress:
+ ret $31,($26),1
+ nop
+ nop
+ nop
+
+ .end memmove
diff --git a/arch/alpha/lib/memset.S b/arch/alpha/lib/memset.S
new file mode 100644
index 000000000000..8ff6e7e1773e
--- /dev/null
+++ b/arch/alpha/lib/memset.S
@@ -0,0 +1,124 @@
+/*
+ * linux/arch/alpha/memset.S
+ *
+ * This is an efficient (and small) implementation of the C library "memset()"
+ * function for the alpha.
+ *
+ * (C) Copyright 1996 Linus Torvalds
+ *
+ * This routine is "moral-ware": you are free to use it any way you wish, and
+ * the only obligation I put on you is a moral one: if you make any improvements
+ * to the routine, please send me your improvements for me to use similarly.
+ *
+ * The scheduling comments are according to the EV5 documentation (and done by
+ * hand, so they might well be incorrect, please do tell me about it..)
+ */
+
+ .set noat
+ .set noreorder
+.text
+ .globl memset
+ .globl __memset
+ .globl __memsetw
+ .globl __constant_c_memset
+ .ent __memset
+.align 5
+__memset:
+ .frame $30,0,$26,0
+ .prologue 0
+
+ and $17,255,$1 /* E1 */
+ insbl $17,1,$17 /* .. E0 */
+ bis $17,$1,$17 /* E0 (p-c latency, next cycle) */
+ sll $17,16,$1 /* E1 (p-c latency, next cycle) */
+
+ bis $17,$1,$17 /* E0 (p-c latency, next cycle) */
+ sll $17,32,$1 /* E1 (p-c latency, next cycle) */
+ bis $17,$1,$17 /* E0 (p-c latency, next cycle) */
+ ldq_u $31,0($30) /* .. E1 */
+
+.align 5
+__constant_c_memset:
+ addq $18,$16,$6 /* E0 */
+ bis $16,$16,$0 /* .. E1 */
+ xor $16,$6,$1 /* E0 */
+ ble $18,end /* .. E1 */
+
+ bic $1,7,$1 /* E0 */
+ beq $1,within_one_quad /* .. E1 (note EV5 zero-latency forwarding) */
+ and $16,7,$3 /* E0 */
+ beq $3,aligned /* .. E1 (note EV5 zero-latency forwarding) */
+
+ ldq_u $4,0($16) /* E0 */
+ bis $16,$16,$5 /* .. E1 */
+ insql $17,$16,$2 /* E0 */
+ subq $3,8,$3 /* .. E1 */
+
+ addq $18,$3,$18 /* E0 $18 is new count ($3 is negative) */
+ mskql $4,$16,$4 /* .. E1 (and possible load stall) */
+ subq $16,$3,$16 /* E0 $16 is new aligned destination */
+ bis $2,$4,$1 /* .. E1 */
+
+ bis $31,$31,$31 /* E0 */
+ ldq_u $31,0($30) /* .. E1 */
+ stq_u $1,0($5) /* E0 */
+ bis $31,$31,$31 /* .. E1 */
+
+.align 4
+aligned:
+ sra $18,3,$3 /* E0 */
+ and $18,7,$18 /* .. E1 */
+ bis $16,$16,$5 /* E0 */
+ beq $3,no_quad /* .. E1 */
+
+.align 3
+loop:
+ stq $17,0($5) /* E0 */
+ subq $3,1,$3 /* .. E1 */
+ addq $5,8,$5 /* E0 */
+ bne $3,loop /* .. E1 */
+
+no_quad:
+ bis $31,$31,$31 /* E0 */
+ beq $18,end /* .. E1 */
+ ldq $7,0($5) /* E0 */
+ mskqh $7,$6,$2 /* .. E1 (and load stall) */
+
+ insqh $17,$6,$4 /* E0 */
+ bis $2,$4,$1 /* .. E1 */
+ stq $1,0($5) /* E0 */
+ ret $31,($26),1 /* .. E1 */
+
+.align 3
+within_one_quad:
+ ldq_u $1,0($16) /* E0 */
+ insql $17,$16,$2 /* E1 */
+ mskql $1,$16,$4 /* E0 (after load stall) */
+ bis $2,$4,$2 /* E0 */
+
+ mskql $2,$6,$4 /* E0 */
+ mskqh $1,$6,$2 /* .. E1 */
+ bis $2,$4,$1 /* E0 */
+ stq_u $1,0($16) /* E0 */
+
+end:
+ ret $31,($26),1 /* E1 */
+ .end __memset
+
+ .align 5
+ .ent __memsetw
+__memsetw:
+ .prologue 0
+
+ inswl $17,0,$1 /* E0 */
+ inswl $17,2,$2 /* E0 */
+ inswl $17,4,$3 /* E0 */
+ or $1,$2,$1 /* .. E1 */
+ inswl $17,6,$4 /* E0 */
+ or $1,$3,$1 /* .. E1 */
+ or $1,$4,$17 /* E0 */
+ br __constant_c_memset /* .. E1 */
+
+ .end __memsetw
+
+memset = __memset
diff --git a/arch/alpha/lib/srm_printk.c b/arch/alpha/lib/srm_printk.c
new file mode 100644
index 000000000000..31b53c49435e
--- /dev/null
+++ b/arch/alpha/lib/srm_printk.c
@@ -0,0 +1,41 @@
+/*
+ * arch/alpha/lib/srm_printk.c
+ */
+
+#include <linux/kernel.h>
+#include <asm/console.h>
+
+long
+srm_printk(const char *fmt, ...)
+{
+ static char buf[1024];
+ va_list args;
+ long len, num_lf;
+ char *src, *dst;
+
+ va_start(args, fmt);
+ len = vsprintf(buf, fmt, args);
+ va_end(args);
+
+ /* count number of linefeeds in string: */
+
+ num_lf = 0;
+ for (src = buf; *src; ++src) {
+ if (*src == '\n') {
+ ++num_lf;
+ }
+ }
+
+ if (num_lf) {
+ /* expand each linefeed into carriage-return/linefeed: */
+ for (dst = src + num_lf; src >= buf; ) {
+ if (*src == '\n') {
+ *dst-- = '\r';
+ }
+ *dst-- = *src--;
+ }
+ }
+
+ srm_puts(buf, num_lf+len);
+ return len;
+}
diff --git a/arch/alpha/lib/srm_puts.c b/arch/alpha/lib/srm_puts.c
new file mode 100644
index 000000000000..7b60a6f75a78
--- /dev/null
+++ b/arch/alpha/lib/srm_puts.c
@@ -0,0 +1,23 @@
+/*
+ * arch/alpha/lib/srm_puts.c
+ */
+
+#include <linux/string.h>
+#include <asm/console.h>
+
+long
+srm_puts(const char *str, long len)
+{
+ long remaining, written;
+
+ if (!callback_init_done)
+ return len;
+
+ for (remaining = len; remaining > 0; remaining -= written)
+ {
+ written = callback_puts(0, str, remaining);
+ written &= 0xffffffff;
+ str += written;
+ }
+ return len;
+}
diff --git a/arch/alpha/lib/stacktrace.c b/arch/alpha/lib/stacktrace.c
new file mode 100644
index 000000000000..6d432e42aedc
--- /dev/null
+++ b/arch/alpha/lib/stacktrace.c
@@ -0,0 +1,103 @@
+#include <linux/kernel.h>
+#include <asm/system.h>
+
+typedef unsigned int instr;
+
+#define MAJOR_OP 0xfc000000
+#define LDA_OP 0x20000000
+#define STQ_OP 0xb4000000
+#define BR_OP 0xc0000000
+
+#define STK_ALLOC_1 0x23de8000 /* lda $30,-X($30) */
+#define STK_ALLOC_1M 0xffff8000
+#define STK_ALLOC_2 0x43c0153e /* subq $30,X,$30 */
+#define STK_ALLOC_2M 0xffe01fff
+
+#define MEM_REG 0x03e00000
+#define MEM_BASE 0x001f0000
+#define MEM_OFF 0x0000ffff
+#define MEM_OFF_SIGN 0x00008000
+#define BASE_SP 0x001e0000
+
+#define STK_ALLOC_MATCH(INSTR) \
+ (((INSTR) & STK_ALLOC_1M) == STK_ALLOC_1 \
+ || ((INSTR) & STK_ALLOC_2M) == STK_ALLOC_2)
+#define STK_PUSH_MATCH(INSTR) \
+ (((INSTR) & (MAJOR_OP | MEM_BASE | MEM_OFF_SIGN)) == (STQ_OP | BASE_SP))
+#define MEM_OP_OFFSET(INSTR) \
+ (((long)((INSTR) & MEM_OFF) << 48) >> 48)
+#define MEM_OP_REG(INSTR) \
+ (((INSTR) & MEM_REG) >> 22)
+
+/* Branches, jumps, PAL calls, and illegal opcodes end a basic block. */
+#define BB_END(INSTR) \
+ (((instr)(INSTR) >= BR_OP) | ((instr)(INSTR) < LDA_OP) | \
+ ((((instr)(INSTR) ^ 0x60000000) < 0x20000000) & \
+ (((instr)(INSTR) & 0x0c000000) != 0)))
+
+#define IS_KERNEL_TEXT(PC) ((unsigned long)(PC) > START_ADDR)
+
+static char reg_name[][4] = {
+ "v0 ", "t0 ", "t1 ", "t2 ", "t3 ", "t4 ", "t5 ", "t6 ", "t7 ",
+ "s0 ", "s1 ", "s2 ", "s3 ", "s4 ", "s5 ", "s6 ", "a0 ", "a1 ",
+ "a2 ", "a3 ", "a4 ", "a5 ", "t8 ", "t9 ", "t10", "t11", "ra ",
+ "pv ", "at ", "gp ", "sp ", "0"
+};
+
+
+static instr *
+display_stored_regs(instr * pro_pc, unsigned char * sp)
+{
+ instr * ret_pc = 0;
+ int reg;
+ unsigned long value;
+
+ printk("Prologue [<%p>], Frame %p:\n", pro_pc, sp);
+ while (!BB_END(*pro_pc))
+ if (STK_PUSH_MATCH(*pro_pc)) {
+ reg = (*pro_pc & MEM_REG) >> 21;
+ value = *(unsigned long *)(sp + (*pro_pc & MEM_OFF));
+ if (reg == 26)
+ ret_pc = (instr *)value;
+ printk("\t\t%s / 0x%016lx\n", reg_name[reg], value);
+ }
+ return ret_pc;
+}
+
+static instr *
+seek_prologue(instr * pc)
+{
+ while (!STK_ALLOC_MATCH(*pc))
+ --pc;
+ while (!BB_END(*(pc - 1)))
+ --pc;
+ return pc;
+}
+
+static long
+stack_increment(instr * prologue_pc)
+{
+ while (!STK_ALLOC_MATCH(*prologue_pc))
+ ++prologue_pc;
+
+ /* Count the bytes allocated. */
+ if ((*prologue_pc & STK_ALLOC_1M) == STK_ALLOC_1M)
+ return -(((long)(*prologue_pc) << 48) >> 48);
+ else
+ return (*prologue_pc >> 13) & 0xff;
+}
+
+void
+stacktrace(void)
+{
+ instr * ret_pc;
+ instr * prologue = (instr *)stacktrace;
+ register unsigned char * sp __asm__ ("$30");
+
+ printk("\tstack trace:\n");
+ do {
+ ret_pc = display_stored_regs(prologue, sp);
+ sp += stack_increment(prologue);
+ prologue = seek_prologue(ret_pc);
+ } while (IS_KERNEL_TEXT(ret_pc));
+}
diff --git a/arch/alpha/lib/strcasecmp.c b/arch/alpha/lib/strcasecmp.c
new file mode 100644
index 000000000000..4e57a216feaf
--- /dev/null
+++ b/arch/alpha/lib/strcasecmp.c
@@ -0,0 +1,26 @@
+/*
+ * linux/arch/alpha/lib/strcasecmp.c
+ */
+
+#include <linux/string.h>
+
+
+/* We handle nothing here except the C locale. Since this is used in
+ only one place, on strings known to contain only 7 bit ASCII, this
+ is ok. */
+
+int strcasecmp(const char *a, const char *b)
+{
+ int ca, cb;
+
+ do {
+ ca = *a++ & 0xff;
+ cb = *b++ & 0xff;
+ if (ca >= 'A' && ca <= 'Z')
+ ca += 'a' - 'A';
+ if (cb >= 'A' && cb <= 'Z')
+ cb += 'a' - 'A';
+ } while (ca == cb && ca != '\0');
+
+ return ca - cb;
+}
diff --git a/arch/alpha/lib/strcat.S b/arch/alpha/lib/strcat.S
new file mode 100644
index 000000000000..393f50384878
--- /dev/null
+++ b/arch/alpha/lib/strcat.S
@@ -0,0 +1,52 @@
+/*
+ * arch/alpha/lib/strcat.S
+ * Contributed by Richard Henderson (rth@tamu.edu)
+ *
+ * Append a null-terminated string from SRC to DST.
+ */
+
+ .text
+
+ .align 3
+ .globl strcat
+ .ent strcat
+strcat:
+ .frame $30, 0, $26
+ .prologue 0
+
+ mov $16, $0 # set up return value
+
+ /* Find the end of the string. */
+
+ ldq_u $1, 0($16) # load first quadword (a0 may be misaligned)
+ lda $2, -1
+ insqh $2, $16, $2
+ andnot $16, 7, $16
+ or $2, $1, $1
+ cmpbge $31, $1, $2 # bits set iff byte == 0
+ bne $2, $found
+
+$loop: ldq $1, 8($16)
+ addq $16, 8, $16
+ cmpbge $31, $1, $2
+ beq $2, $loop
+
+$found: negq $2, $3 # clear all but least set bit
+ and $2, $3, $2
+
+ and $2, 0xf0, $3 # binary search for that set bit
+ and $2, 0xcc, $4
+ and $2, 0xaa, $5
+ cmovne $3, 4, $3
+ cmovne $4, 2, $4
+ cmovne $5, 1, $5
+ addq $3, $4, $3
+ addq $16, $5, $16
+ addq $16, $3, $16
+
+ /* Now do the append. */
+
+ mov $26, $23
+ br __stxcpy
+
+ .end strcat
diff --git a/arch/alpha/lib/strchr.S b/arch/alpha/lib/strchr.S
new file mode 100644
index 000000000000..011a175e8329
--- /dev/null
+++ b/arch/alpha/lib/strchr.S
@@ -0,0 +1,70 @@
+/*
+ * arch/alpha/lib/strchr.S
+ * Contributed by Richard Henderson (rth@tamu.edu)
+ *
+ * Return the address of a given character within a null-terminated
+ * string, or null if it is not found.
+ */
+
+#include <asm/regdef.h>
+
+ .set noreorder
+ .set noat
+
+ .align 3
+ .globl strchr
+ .ent strchr
+strchr:
+ .frame sp, 0, ra
+ .prologue 0
+
+ zapnot a1, 1, a1 # e0 : zero extend the search character
+ ldq_u t0, 0(a0) # .. e1 : load first quadword
+ sll a1, 8, t5 # e0 : replicate the search character
+ andnot a0, 7, v0 # .. e1 : align our loop pointer
+ or t5, a1, a1 # e0 :
+ lda t4, -1 # .. e1 : build garbage mask
+ sll a1, 16, t5 # e0 :
+ cmpbge zero, t0, t2 # .. e1 : bits set iff byte == zero
+ mskqh t4, a0, t4 # e0 :
+ or t5, a1, a1 # .. e1 :
+ sll a1, 32, t5 # e0 :
+ cmpbge zero, t4, t4 # .. e1 : bits set iff byte is garbage
+ or t5, a1, a1 # e0 :
+ xor t0, a1, t1 # .. e1 : make bytes == c zero
+ cmpbge zero, t1, t3 # e0 : bits set iff byte == c
+ or t2, t3, t0 # e1 : bits set iff char match or zero match
+ andnot t0, t4, t0 # e0 : clear garbage bits
+ bne t0, $found # .. e1 (zdb)
+
+$loop: ldq t0, 8(v0) # e0 :
+ addq v0, 8, v0 # .. e1 :
+ nop # e0 :
+ xor t0, a1, t1 # .. e1 (ev5 data stall)
+ cmpbge zero, t0, t2 # e0 : bits set iff byte == 0
+ cmpbge zero, t1, t3 # .. e1 : bits set iff byte == c
+ or t2, t3, t0 # e0 :
+ beq t0, $loop # .. e1 (zdb)
+
+$found: negq t0, t1 # e0 : clear all but least set bit
+ and t0, t1, t0 # e1 (stall)
+
+ and t0, t3, t1 # e0 : bit set iff byte was the char
+ beq t1, $retnull # .. e1 (zdb)
+
+ and t0, 0xf0, t2 # e0 : binary search for that set bit
+ and t0, 0xcc, t3 # .. e1 :
+ and t0, 0xaa, t4 # e0 :
+ cmovne t2, 4, t2 # .. e1 :
+ cmovne t3, 2, t3 # e0 :
+ cmovne t4, 1, t4 # .. e1 :
+ addq t2, t3, t2 # e0 :
+ addq v0, t4, v0 # .. e1 :
+ addq v0, t2, v0 # e0 :
+ ret # .. e1 :
+
+$retnull:
+ mov zero, v0 # e0 :
+ ret # .. e1 :
+
+ .end strchr
diff --git a/arch/alpha/lib/strcpy.S b/arch/alpha/lib/strcpy.S
new file mode 100644
index 000000000000..e0728e4ad21f
--- /dev/null
+++ b/arch/alpha/lib/strcpy.S
@@ -0,0 +1,23 @@
+/*
+ * arch/alpha/lib/strcpy.S
+ * Contributed by Richard Henderson (rth@tamu.edu)
+ *
+ * Copy a null-terminated string from SRC to DST. Return a pointer
+ * to the null-terminator in the source.
+ */
+
+ .text
+
+ .align 3
+ .globl strcpy
+ .ent strcpy
+strcpy:
+ .frame $30, 0, $26
+ .prologue 0
+
+ mov $16, $0 # set up return value
+ mov $26, $23 # set up return address
+ unop
+ br __stxcpy # do the copy
+
+ .end strcpy
diff --git a/arch/alpha/lib/strlen.S b/arch/alpha/lib/strlen.S
new file mode 100644
index 000000000000..fe63353de152
--- /dev/null
+++ b/arch/alpha/lib/strlen.S
@@ -0,0 +1,57 @@
+/*
+ * strlen.S (c) 1995 David Mosberger (davidm@cs.arizona.edu)
+ *
+ * Finds length of a 0-terminated string. Optimized for the
+ * Alpha architecture:
+ *
+ * - memory accessed as aligned quadwords only
+ * - uses bcmpge to compare 8 bytes in parallel
+ * - does binary search to find 0 byte in last
+ * quadword (HAKMEM needed 12 instructions to
+ * do this instead of the 9 instructions that
+ * binary search needs).
+ */
+
+ .set noreorder
+ .set noat
+
+ .align 3
+
+ .globl strlen
+ .ent strlen
+
+strlen:
+ ldq_u $1, 0($16) # load first quadword ($16 may be misaligned)
+ lda $2, -1($31)
+ insqh $2, $16, $2
+ andnot $16, 7, $0
+ or $2, $1, $1
+ cmpbge $31, $1, $2 # $2 <- bitmask: bit i == 1 <==> i-th byte == 0
+ bne $2, found
+
+loop: ldq $1, 8($0)
+ addq $0, 8, $0 # addr += 8
+ nop # helps dual issue last two insns
+ cmpbge $31, $1, $2
+ beq $2, loop
+
+found: blbs $2, done # make aligned case fast
+ negq $2, $3
+ and $2, $3, $2
+
+ and $2, 0x0f, $1
+ addq $0, 4, $3
+ cmoveq $1, $3, $0
+
+ and $2, 0x33, $1
+ addq $0, 2, $3
+ cmoveq $1, $3, $0
+
+ and $2, 0x55, $1
+ addq $0, 1, $3
+ cmoveq $1, $3, $0
+
+done: subq $0, $16, $0
+ ret $31, ($26)
+
+ .end strlen
diff --git a/arch/alpha/lib/strlen_user.S b/arch/alpha/lib/strlen_user.S
new file mode 100644
index 000000000000..508a18e96479
--- /dev/null
+++ b/arch/alpha/lib/strlen_user.S
@@ -0,0 +1,91 @@
+/*
+ * arch/alpha/lib/strlen_user.S
+ *
+ * Return the length of the string including the NUL terminator
+ * (strlen+1) or zero if an error occurred.
+ *
+ * In places where it is critical to limit the processing time,
+ * and the data is not trusted, strnlen_user() should be used.
+ * It will return a value greater than its second argument if
+ * that limit would be exceeded. This implementation is allowed
+ * to access memory beyond the limit, but will not cross a page
+ * boundary when doing so.
+ */
+
+#include <asm/regdef.h>
+
+
+/* Allow an exception for an insn; exit if we get one. */
+#define EX(x,y...) \
+ 99: x,##y; \
+ .section __ex_table,"a"; \
+ .long 99b - .; \
+ lda v0, $exception-99b(zero); \
+ .previous
+
+
+ .set noreorder
+ .set noat
+ .text
+
+ .globl __strlen_user
+ .ent __strlen_user
+ .frame sp, 0, ra
+
+ .align 3
+__strlen_user:
+ ldah a1, 32767(zero) # do not use plain strlen_user() for strings
+ # that might be almost 2 GB long; you should
+ # be using strnlen_user() instead
+
+ .globl __strnlen_user
+
+ .align 3
+__strnlen_user:
+ .prologue 0
+
+ EX( ldq_u t0, 0(a0) ) # load first quadword (a0 may be misaligned)
+ lda t1, -1(zero)
+ insqh t1, a0, t1
+ andnot a0, 7, v0
+ or t1, t0, t0
+ subq a0, 1, a0 # get our +1 for the return
+ cmpbge zero, t0, t1 # t1 <- bitmask: bit i == 1 <==> i-th byte == 0
+ subq a1, 7, t2
+ subq a0, v0, t0
+ bne t1, $found
+
+ addq t2, t0, t2
+ addq a1, 1, a1
+
+ .align 3
+$loop: ble t2, $limit
+ EX( ldq t0, 8(v0) )
+ subq t2, 8, t2
+ addq v0, 8, v0 # addr += 8
+ cmpbge zero, t0, t1
+ beq t1, $loop
+
+$found: negq t1, t2 # clear all but least set bit
+ and t1, t2, t1
+
+ and t1, 0xf0, t2 # binary search for that set bit
+ and t1, 0xcc, t3
+ and t1, 0xaa, t4
+ cmovne t2, 4, t2
+ cmovne t3, 2, t3
+ cmovne t4, 1, t4
+ addq t2, t3, t2
+ addq v0, t4, v0
+ addq v0, t2, v0
+ nop # dual issue next two on ev4 and ev5
+ subq v0, a0, v0
+$exception:
+ ret
+
+ .align 3 # currently redundant
+$limit:
+ subq a1, t2, v0
+ ret
+
+ .end __strlen_user
diff --git a/arch/alpha/lib/strncat.S b/arch/alpha/lib/strncat.S
new file mode 100644
index 000000000000..a8278163c972
--- /dev/null
+++ b/arch/alpha/lib/strncat.S
@@ -0,0 +1,84 @@
+/*
+ * arch/alpha/lib/strncat.S
+ * Contributed by Richard Henderson (rth@tamu.edu)
+ *
+ * Append no more than COUNT characters from the null-terminated string SRC
+ * to the null-terminated string DST. Always null-terminate the new DST.
+ *
+ * This differs slightly from the semantics in libc in that we never write
+ * past count, whereas libc may write to count+1. This follows the generic
+ * implementation in lib/string.c and is, IMHO, more sensible.
+ */
+
+ .text
+
+ .align 3
+ .globl strncat
+ .ent strncat
+strncat:
+ .frame $30, 0, $26
+ .prologue 0
+
+ mov $16, $0 # set up return value
+ beq $18, $zerocount
+
+ /* Find the end of the string. */
+
+ ldq_u $1, 0($16) # load first quadword ($16 may be misaligned)
+ lda $2, -1($31)
+ insqh $2, $16, $2
+ andnot $16, 7, $16
+ or $2, $1, $1
+ cmpbge $31, $1, $2 # bits set iff byte == 0
+ bne $2, $found
+
+$loop: ldq $1, 8($16)
+ addq $16, 8, $16
+ cmpbge $31, $1, $2
+ beq $2, $loop
+
+$found: negq $2, $3 # clear all but least set bit
+ and $2, $3, $2
+
+ and $2, 0xf0, $3 # binary search for that set bit
+ and $2, 0xcc, $4
+ and $2, 0xaa, $5
+ cmovne $3, 4, $3
+ cmovne $4, 2, $4
+ cmovne $5, 1, $5
+ addq $3, $4, $3
+ addq $16, $5, $16
+ addq $16, $3, $16
+
+ /* Now do the append. */
+
+ bsr $23, __stxncpy
+
+ /* Worry about the null termination. */
+
+ zapnot $1, $27, $2 # was last byte a null?
+ bne $2, 0f
+ ret
+
+0: cmplt $27, $24, $2 # did we fill the buffer completely?
+ or $2, $18, $2
+ bne $2, 2f
+
+ and $24, 0x80, $2 # no zero next byte
+ bne $2, 1f
+
+ /* Here there are bytes left in the current word. Clear one. */
+ addq $24, $24, $24 # end-of-count bit <<= 1
+2: zap $1, $24, $1
+ stq_u $1, 0($16)
+ ret
+
+1: /* Here we must read the next DST word and clear the first byte. */
+ ldq_u $1, 8($16)
+ zap $1, 1, $1
+ stq_u $1, 8($16)
+
+$zerocount:
+ ret
+
+ .end strncat
diff --git a/arch/alpha/lib/strncpy.S b/arch/alpha/lib/strncpy.S
new file mode 100644
index 000000000000..338551c7113c
--- /dev/null
+++ b/arch/alpha/lib/strncpy.S
@@ -0,0 +1,81 @@
+/*
+ * arch/alpha/lib/strncpy.S
+ * Contributed by Richard Henderson (rth@tamu.edu)
+ *
+ * Copy no more than COUNT bytes of the null-terminated string from
+ * SRC to DST. If SRC does not cover all of COUNT, the balance is
+ * zeroed.
+ *
+ * Or, rather, if the kernel cared about that weird ANSI quirk. This
+ * version has cropped that bit o' nastiness as well as assuming that
+ * __stxncpy is in range of a branch.
+ */
+
+ .set noat
+ .set noreorder
+
+ .text
+
+ .align 4
+ .globl strncpy
+ .ent strncpy
+strncpy:
+ .frame $30, 0, $26
+ .prologue 0
+
+ mov $16, $0 # set return value now
+ beq $18, $zerolen
+ unop
+ bsr $23, __stxncpy # do the work of the copy
+
+ unop
+ bne $18, $multiword # do we have full words left?
+ subq $24, 1, $3 # nope
+ subq $27, 1, $4
+
+ or $3, $24, $3 # clear the bits between the last
+ or $4, $27, $4 # written byte and the last byte in COUNT
+ andnot $4, $3, $4
+ zap $1, $4, $1
+
+ stq_u $1, 0($16)
+ ret
+
+ .align 4
+$multiword:
+ subq $24, 1, $2 # clear the final bits in the prev word
+ or $2, $24, $2
+ zapnot $1, $2, $1
+ subq $18, 1, $18
+
+ stq_u $1, 0($16)
+ addq $16, 8, $16
+ unop
+ beq $18, 1f
+
+ nop
+ unop
+ nop
+ blbc $18, 0f
+
+ stq_u $31, 0($16) # zero one word
+ subq $18, 1, $18
+ addq $16, 8, $16
+ beq $18, 1f
+
+0: stq_u $31, 0($16) # zero two words
+ subq $18, 2, $18
+ stq_u $31, 8($16)
+ addq $16, 16, $16
+ bne $18, 0b
+
+1: ldq_u $1, 0($16) # clear the leading bits in the final word
+ subq $27, 1, $2
+ or $2, $27, $2
+
+ zap $1, $2, $1
+ stq_u $1, 0($16)
+$zerolen:
+ ret
+
+ .end strncpy
diff --git a/arch/alpha/lib/strncpy_from_user.S b/arch/alpha/lib/strncpy_from_user.S
new file mode 100644
index 000000000000..73ee21160ff7
--- /dev/null
+++ b/arch/alpha/lib/strncpy_from_user.S
@@ -0,0 +1,339 @@
+/*
+ * arch/alpha/lib/strncpy_from_user.S
+ * Contributed by Richard Henderson (rth@tamu.edu)
+ *
+ * Just like strncpy except in the return value:
+ *
+ * -EFAULT if an exception occurs before the terminator is copied.
+ * N if the buffer filled.
+ *
+ * Otherwise the length of the string is returned.
+ */
+
+
+#include <asm/errno.h>
+#include <asm/regdef.h>
+
+
+/* Allow an exception for an insn; exit if we get one. */
+#define EX(x,y...) \
+ 99: x,##y; \
+ .section __ex_table,"a"; \
+ .long 99b - .; \
+ lda $31, $exception-99b($0); \
+ .previous
+
+
+ .set noat
+ .set noreorder
+ .text
+
+ .globl __strncpy_from_user
+ .ent __strncpy_from_user
+ .frame $30, 0, $26
+ .prologue 0
+
+ .align 3
+$aligned:
+ /* On entry to this basic block:
+ t0 == the first destination word for masking back in
+ t1 == the first source word. */
+
+ /* Create the 1st output word and detect 0's in the 1st input word. */
+ lda t2, -1 # e1 : build a mask against false zero
+ mskqh t2, a1, t2 # e0 : detection in the src word
+ mskqh t1, a1, t3 # e0 :
+ ornot t1, t2, t2 # .. e1 :
+ mskql t0, a1, t0 # e0 : assemble the first output word
+ cmpbge zero, t2, t8 # .. e1 : bits set iff null found
+ or t0, t3, t0 # e0 :
+ beq a2, $a_eoc # .. e1 :
+ bne t8, $a_eos # .. e1 :
+
+ /* On entry to this basic block:
+ t0 == a source word not containing a null. */
+
+$a_loop:
+ stq_u t0, 0(a0) # e0 :
+ addq a0, 8, a0 # .. e1 :
+ EX( ldq_u t0, 0(a1) ) # e0 :
+ addq a1, 8, a1 # .. e1 :
+ subq a2, 1, a2 # e0 :
+ cmpbge zero, t0, t8 # .. e1 (stall)
+ beq a2, $a_eoc # e1 :
+ beq t8, $a_loop # e1 :
+
+ /* Take care of the final (partial) word store. At this point
+ the end-of-count bit is set in t8 iff it applies.
+
+ On entry to this basic block we have:
+ t0 == the source word containing the null
+ t8 == the cmpbge mask that found it. */
+
+$a_eos:
+ negq t8, t12 # e0 : find low bit set
+ and t8, t12, t12 # e1 (stall)
+
+ /* For the sake of the cache, don't read a destination word
+ if we're not going to need it. */
+ and t12, 0x80, t6 # e0 :
+ bne t6, 1f # .. e1 (zdb)
+
+ /* We're doing a partial word store and so need to combine
+ our source and original destination words. */
+ ldq_u t1, 0(a0) # e0 :
+ subq t12, 1, t6 # .. e1 :
+ or t12, t6, t8 # e0 :
+ unop #
+ zapnot t0, t8, t0 # e0 : clear src bytes > null
+ zap t1, t8, t1 # .. e1 : clear dst bytes <= null
+ or t0, t1, t0 # e1 :
+
+1: stq_u t0, 0(a0)
+ br $finish_up
+
+ /* Add the end-of-count bit to the eos detection bitmask. */
+$a_eoc:
+ or t10, t8, t8
+ br $a_eos
+
+ /*** The Function Entry Point ***/
+ .align 3
+__strncpy_from_user:
+ mov a0, v0 # save the string start
+ beq a2, $zerolength
+
+ /* Are source and destination co-aligned? */
+ xor a0, a1, t1 # e0 :
+ and a0, 7, t0 # .. e1 : find dest misalignment
+ and t1, 7, t1 # e0 :
+ addq a2, t0, a2 # .. e1 : bias count by dest misalignment
+ subq a2, 1, a2 # e0 :
+ and a2, 7, t2 # e1 :
+ srl a2, 3, a2 # e0 : a2 = loop counter = (count - 1)/8
+ addq zero, 1, t10 # .. e1 :
+ sll t10, t2, t10 # e0 : t10 = bitmask of last count byte
+ bne t1, $unaligned # .. e1 :
+
+ /* We are co-aligned; take care of a partial first word. */
+
+ EX( ldq_u t1, 0(a1) ) # e0 : load first src word
+ addq a1, 8, a1 # .. e1 :
+
+ beq t0, $aligned # avoid loading dest word if not needed
+ ldq_u t0, 0(a0) # e0 :
+ br $aligned # .. e1 :
+
+
+/* The source and destination are not co-aligned. Align the destination
+ and cope. We have to be very careful about not reading too much and
+ causing a SEGV. */
+
+ .align 3
+$u_head:
+ /* We know just enough now to be able to assemble the first
+ full source word. We can still find a zero at the end of it
+ that prevents us from outputting the whole thing.
+
+ On entry to this basic block:
+ t0 == the first dest word, unmasked
+ t1 == the shifted low bits of the first source word
+ t6 == bytemask that is -1 in dest word bytes */
+
+ EX( ldq_u t2, 8(a1) ) # e0 : load second src word
+ addq a1, 8, a1 # .. e1 :
+ mskql t0, a0, t0 # e0 : mask trailing garbage in dst
+ extqh t2, a1, t4 # e0 :
+ or t1, t4, t1 # e1 : first aligned src word complete
+ mskqh t1, a0, t1 # e0 : mask leading garbage in src
+ or t0, t1, t0 # e0 : first output word complete
+ or t0, t6, t6 # e1 : mask original data for zero test
+ cmpbge zero, t6, t8 # e0 :
+ beq a2, $u_eocfin # .. e1 :
+ bne t8, $u_final # e1 :
+
+ lda t6, -1 # e1 : mask out the bits we have
+ mskql t6, a1, t6 # e0 : already seen
+ stq_u t0, 0(a0) # e0 : store first output word
+ or t6, t2, t2 # .. e1 :
+ cmpbge zero, t2, t8 # e0 : find nulls in second partial
+ addq a0, 8, a0 # .. e1 :
+ subq a2, 1, a2 # e0 :
+ bne t8, $u_late_head_exit # .. e1 :
+
+ /* Finally, we've got all the stupid leading edge cases taken care
+ of and we can set up to enter the main loop. */
+
+ extql t2, a1, t1 # e0 : position hi-bits of lo word
+ EX( ldq_u t2, 8(a1) ) # .. e1 : read next high-order source word
+ addq a1, 8, a1 # e0 :
+ cmpbge zero, t2, t8 # e1 (stall)
+ beq a2, $u_eoc # e1 :
+ bne t8, $u_eos # e1 :
+
+ /* Unaligned copy main loop. In order to avoid reading too much,
+ the loop is structured to detect zeros in aligned source words.
+ This has, unfortunately, effectively pulled half of a loop
+ iteration out into the head and half into the tail, but it does
+ prevent nastiness from accumulating in the very thing we want
+ to run as fast as possible.
+
+ On entry to this basic block:
+ t1 == the shifted high-order bits from the previous source word
+ t2 == the unshifted current source word
+
+ We further know that t2 does not contain a null terminator. */
+
+ .align 3
+$u_loop:
+ extqh t2, a1, t0 # e0 : extract high bits for current word
+ addq a1, 8, a1 # .. e1 :
+ extql t2, a1, t3 # e0 : extract low bits for next time
+ addq a0, 8, a0 # .. e1 :
+ or t0, t1, t0 # e0 : current dst word now complete
+ EX( ldq_u t2, 0(a1) ) # .. e1 : load high word for next time
+ stq_u t0, -8(a0) # e0 : save the current word
+ mov t3, t1 # .. e1 :
+ subq a2, 1, a2 # e0 :
+ cmpbge zero, t2, t8 # .. e1 : test new word for eos
+ beq a2, $u_eoc # e1 :
+ beq t8, $u_loop # e1 :
+
+ /* We've found a zero somewhere in the source word we just read.
+ If it resides in the lower half, we have one (probably partial)
+ word to write out, and if it resides in the upper half, we
+ have one full and one partial word left to write out.
+
+ On entry to this basic block:
+ t1 == the shifted high-order bits from the previous source word
+ t2 == the unshifted current source word. */
+$u_eos:
+ extqh t2, a1, t0 # e0 :
+ or t0, t1, t0 # e1 : first (partial) source word complete
+
+ cmpbge zero, t0, t8 # e0 : is the null in this first bit?
+ bne t8, $u_final # .. e1 (zdb)
+
+ stq_u t0, 0(a0) # e0 : the null was in the high-order bits
+ addq a0, 8, a0 # .. e1 :
+ subq a2, 1, a2 # e1 :
+
+$u_late_head_exit:
+ extql t2, a1, t0 # .. e0 :
+ cmpbge zero, t0, t8 # e0 :
+ or t8, t10, t6 # e1 :
+ cmoveq a2, t6, t8 # e0 :
+ nop # .. e1 :
+
+ /* Take care of a final (probably partial) result word.
+ On entry to this basic block:
+ t0 == assembled source word
+ t8 == cmpbge mask that found the null. */
+$u_final:
+ negq t8, t6 # e0 : isolate low bit set
+ and t6, t8, t12 # e1 :
+
+ and t12, 0x80, t6 # e0 : avoid dest word load if we can
+ bne t6, 1f # .. e1 (zdb)
+
+ ldq_u t1, 0(a0) # e0 :
+ subq t12, 1, t6 # .. e1 :
+ or t6, t12, t8 # e0 :
+ zapnot t0, t8, t0 # .. e1 : kill source bytes > null
+ zap t1, t8, t1 # e0 : kill dest bytes <= null
+ or t0, t1, t0 # e1 :
+
+1: stq_u t0, 0(a0) # e0 :
+ br $finish_up
+
+$u_eoc: # end-of-count
+ extqh t2, a1, t0
+ or t0, t1, t0
+ cmpbge zero, t0, t8
+
+$u_eocfin: # end-of-count, final word
+ or t10, t8, t8
+ br $u_final
+
+ /* Unaligned copy entry point. */
+ .align 3
+$unaligned:
+
+ EX( ldq_u t1, 0(a1) ) # e0 : load first source word
+
+ and a0, 7, t4 # .. e1 : find dest misalignment
+ and a1, 7, t5 # e0 : find src misalignment
+
+ /* Conditionally load the first destination word and a bytemask
+ with 0xff indicating that the destination byte is sacrosanct. */
+
+ mov zero, t0 # .. e1 :
+ mov zero, t6 # e0 :
+ beq t4, 1f # .. e1 :
+ ldq_u t0, 0(a0) # e0 :
+ lda t6, -1 # .. e1 :
+ mskql t6, a0, t6 # e0 :
+1:
+ subq a1, t4, a1 # .. e1 : sub dest misalignment from src addr
+
+ /* If source misalignment is larger than dest misalignment, we need
+ extra startup checks to avoid SEGV. */
+
+ cmplt t4, t5, t12 # e1 :
+ extql t1, a1, t1 # .. e0 : shift src into place
+ lda t2, -1 # e0 : for creating masks later
+ beq t12, $u_head # e1 :
+
+ mskqh t2, t5, t2 # e0 : begin src byte validity mask
+ cmpbge zero, t1, t8 # .. e1 : is there a zero?
+ extql t2, a1, t2 # e0 :
+ or t8, t10, t5 # .. e1 : test for end-of-count too
+ cmpbge zero, t2, t3 # e0 :
+ cmoveq a2, t5, t8 # .. e1 :
+ andnot t8, t3, t8 # e0 :
+ beq t8, $u_head # .. e1 (zdb)
+
+ /* At this point we've found a zero in the first partial word of
+ the source. We need to isolate the valid source data and mask
+ it into the original destination data. (Incidentally, we know
+ that we'll need at least one byte of that original dest word.) */
+
+ ldq_u t0, 0(a0) # e0 :
+ negq t8, t6 # .. e1 : build bitmask of bytes <= zero
+ mskqh t1, t4, t1 # e0 :
+ and t6, t8, t12 # .. e1 :
+ subq t12, 1, t6 # e0 :
+ or t6, t12, t8 # e1 :
+
+ zapnot t2, t8, t2 # e0 : prepare source word; mirror changes
+ zapnot t1, t8, t1 # .. e1 : to source validity mask
+
+ andnot t0, t2, t0 # e0 : zero place for source to reside
+ or t0, t1, t0 # e1 : and put it there
+ stq_u t0, 0(a0) # e0 :
+
+$finish_up:
+ zapnot t0, t12, t4 # was last byte written null?
+ cmovne t4, 1, t4
+
+ and t12, 0xf0, t3 # binary search for the address of the
+ and t12, 0xcc, t2 # last byte written
+ and t12, 0xaa, t1
+ bic a0, 7, t0
+ cmovne t3, 4, t3
+ cmovne t2, 2, t2
+ cmovne t1, 1, t1
+ addq t0, t3, t0
+ addq t1, t2, t1
+ addq t0, t1, t0
+ addq t0, t4, t0 # add one if we filled the buffer
+
+ subq t0, v0, v0 # find string length
+ ret
+
+$zerolength:
+ clr v0
+$exception:
+ ret
+
+ .end __strncpy_from_user
diff --git a/arch/alpha/lib/strrchr.S b/arch/alpha/lib/strrchr.S
new file mode 100644
index 000000000000..82cfd0ac907b
--- /dev/null
+++ b/arch/alpha/lib/strrchr.S
@@ -0,0 +1,87 @@
+/*
+ * arch/alpha/lib/strrchr.S
+ * Contributed by Richard Henderson (rth@tamu.edu)
+ *
+ * Return the address of the last occurrence of a given character
+ * within a null-terminated string, or null if it is not found.
+ */
+
+#include <asm/regdef.h>
+
+ .set noreorder
+ .set noat
+
+ .align 3
+ .ent strrchr
+ .globl strrchr
+strrchr:
+ .frame sp, 0, ra
+ .prologue 0
+
+ zapnot a1, 1, a1 # e0 : zero extend our test character
+ mov zero, t6 # .. e1 : t6 is last match aligned addr
+ sll a1, 8, t5 # e0 : replicate our test character
+ mov zero, t8 # .. e1 : t8 is last match byte compare mask
+ or t5, a1, a1 # e0 :
+ ldq_u t0, 0(a0) # .. e1 : load first quadword
+ sll a1, 16, t5 # e0 :
+ andnot a0, 7, v0 # .. e1 : align source addr
+ or t5, a1, a1 # e0 :
+ lda t4, -1 # .. e1 : build garbage mask
+ sll a1, 32, t5 # e0 :
+ cmpbge zero, t0, t1 # .. e1 : bits set iff byte == zero
+ mskqh t4, a0, t4 # e0 :
+ or t5, a1, a1 # .. e1 : character replication complete
+ xor t0, a1, t2 # e0 : make bytes == c zero
+ cmpbge zero, t4, t4 # .. e1 : bits set iff byte is garbage
+ cmpbge zero, t2, t3 # e0 : bits set iff byte == c
+ andnot t1, t4, t1 # .. e1 : clear garbage from null test
+ andnot t3, t4, t3 # e0 : clear garbage from char test
+ bne t1, $eos # .. e1 : did we already hit the terminator?
+
+ /* Character search main loop */
+$loop:
+ ldq t0, 8(v0) # e0 : load next quadword
+ cmovne t3, v0, t6 # .. e1 : save previous comparisons match
+ cmovne t3, t3, t8 # e0 :
+ addq v0, 8, v0 # .. e1 :
+ xor t0, a1, t2 # e0 :
+ cmpbge zero, t0, t1 # .. e1 : bits set iff byte == zero
+ cmpbge zero, t2, t3 # e0 : bits set iff byte == c
+ beq t1, $loop # .. e1 : if we havnt seen a null, loop
+
+ /* Mask out character matches after terminator */
+$eos:
+ negq t1, t4 # e0 : isolate first null byte match
+ and t1, t4, t4 # e1 :
+ subq t4, 1, t5 # e0 : build a mask of the bytes upto...
+ or t4, t5, t4 # e1 : ... and including the null
+
+ and t3, t4, t3 # e0 : mask out char matches after null
+ cmovne t3, t3, t8 # .. e1 : save it, if match found
+ cmovne t3, v0, t6 # e0 :
+
+ /* Locate the address of the last matched character */
+
+ /* Retain the early exit for the ev4 -- the ev5 mispredict penalty
+ is 5 cycles -- the same as just falling through. */
+ beq t8, $retnull # .. e1 :
+
+ and t8, 0xf0, t2 # e0 : binary search for the high bit set
+ cmovne t2, t2, t8 # .. e1 (zdb)
+ cmovne t2, 4, t2 # e0 :
+ and t8, 0xcc, t1 # .. e1 :
+ cmovne t1, t1, t8 # e0 :
+ cmovne t1, 2, t1 # .. e1 :
+ and t8, 0xaa, t0 # e0 :
+ cmovne t0, 1, t0 # .. e1 (zdb)
+ addq t2, t1, t1 # e0 :
+ addq t6, t0, v0 # .. e1 : add our aligned base ptr to the mix
+ addq v0, t1, v0 # e0 :
+ ret # .. e1 :
+
+$retnull:
+ mov zero, v0 # e0 :
+ ret # .. e1 :
+
+ .end strrchr
diff --git a/arch/alpha/lib/stxcpy.S b/arch/alpha/lib/stxcpy.S
new file mode 100644
index 000000000000..2a8d51bfc05d
--- /dev/null
+++ b/arch/alpha/lib/stxcpy.S
@@ -0,0 +1,289 @@
+/*
+ * arch/alpha/lib/stxcpy.S
+ * Contributed by Richard Henderson (rth@tamu.edu)
+ *
+ * Copy a null-terminated string from SRC to DST.
+ *
+ * This is an internal routine used by strcpy, stpcpy, and strcat.
+ * As such, it uses special linkage conventions to make implementation
+ * of these public functions more efficient.
+ *
+ * On input:
+ * t9 = return address
+ * a0 = DST
+ * a1 = SRC
+ *
+ * On output:
+ * t12 = bitmask (with one bit set) indicating the last byte written
+ * a0 = unaligned address of the last *word* written
+ *
+ * Furthermore, v0, a3-a5, t11, and t12 are untouched.
+ */
+
+#include <asm/regdef.h>
+
+ .set noat
+ .set noreorder
+
+ .text
+
+/* There is a problem with either gdb (as of 4.16) or gas (as of 2.7) that
+ doesn't like putting the entry point for a procedure somewhere in the
+ middle of the procedure descriptor. Work around this by putting the
+ aligned copy in its own procedure descriptor */
+
+ .ent stxcpy_aligned
+ .align 3
+stxcpy_aligned:
+ .frame sp, 0, t9
+ .prologue 0
+
+ /* On entry to this basic block:
+ t0 == the first destination word for masking back in
+ t1 == the first source word. */
+
+ /* Create the 1st output word and detect 0's in the 1st input word. */
+ lda t2, -1 # e1 : build a mask against false zero
+ mskqh t2, a1, t2 # e0 : detection in the src word
+ mskqh t1, a1, t3 # e0 :
+ ornot t1, t2, t2 # .. e1 :
+ mskql t0, a1, t0 # e0 : assemble the first output word
+ cmpbge zero, t2, t8 # .. e1 : bits set iff null found
+ or t0, t3, t1 # e0 :
+ bne t8, $a_eos # .. e1 :
+
+ /* On entry to this basic block:
+ t0 == the first destination word for masking back in
+ t1 == a source word not containing a null. */
+
+$a_loop:
+ stq_u t1, 0(a0) # e0 :
+ addq a0, 8, a0 # .. e1 :
+ ldq_u t1, 0(a1) # e0 :
+ addq a1, 8, a1 # .. e1 :
+ cmpbge zero, t1, t8 # e0 (stall)
+ beq t8, $a_loop # .. e1 (zdb)
+
+ /* Take care of the final (partial) word store.
+ On entry to this basic block we have:
+ t1 == the source word containing the null
+ t8 == the cmpbge mask that found it. */
+$a_eos:
+ negq t8, t6 # e0 : find low bit set
+ and t8, t6, t12 # e1 (stall)
+
+ /* For the sake of the cache, don't read a destination word
+ if we're not going to need it. */
+ and t12, 0x80, t6 # e0 :
+ bne t6, 1f # .. e1 (zdb)
+
+ /* We're doing a partial word store and so need to combine
+ our source and original destination words. */
+ ldq_u t0, 0(a0) # e0 :
+ subq t12, 1, t6 # .. e1 :
+ zapnot t1, t6, t1 # e0 : clear src bytes >= null
+ or t12, t6, t8 # .. e1 :
+ zap t0, t8, t0 # e0 : clear dst bytes <= null
+ or t0, t1, t1 # e1 :
+
+1: stq_u t1, 0(a0) # e0 :
+ ret (t9) # .. e1 :
+
+ .end stxcpy_aligned
+
+ .align 3
+ .ent __stxcpy
+ .globl __stxcpy
+__stxcpy:
+ .frame sp, 0, t9
+ .prologue 0
+
+ /* Are source and destination co-aligned? */
+ xor a0, a1, t0 # e0 :
+ unop # :
+ and t0, 7, t0 # e0 :
+ bne t0, $unaligned # .. e1 :
+
+ /* We are co-aligned; take care of a partial first word. */
+ ldq_u t1, 0(a1) # e0 : load first src word
+ and a0, 7, t0 # .. e1 : take care not to load a word ...
+ addq a1, 8, a1 # e0 :
+ beq t0, stxcpy_aligned # .. e1 : ... if we wont need it
+ ldq_u t0, 0(a0) # e0 :
+ br stxcpy_aligned # .. e1 :
+
+
+/* The source and destination are not co-aligned. Align the destination
+ and cope. We have to be very careful about not reading too much and
+ causing a SEGV. */
+
+ .align 3
+$u_head:
+ /* We know just enough now to be able to assemble the first
+ full source word. We can still find a zero at the end of it
+ that prevents us from outputting the whole thing.
+
+ On entry to this basic block:
+ t0 == the first dest word, for masking back in, if needed else 0
+ t1 == the low bits of the first source word
+ t6 == bytemask that is -1 in dest word bytes */
+
+ ldq_u t2, 8(a1) # e0 :
+ addq a1, 8, a1 # .. e1 :
+
+ extql t1, a1, t1 # e0 :
+ extqh t2, a1, t4 # e0 :
+ mskql t0, a0, t0 # e0 :
+ or t1, t4, t1 # .. e1 :
+ mskqh t1, a0, t1 # e0 :
+ or t0, t1, t1 # e1 :
+
+ or t1, t6, t6 # e0 :
+ cmpbge zero, t6, t8 # .. e1 :
+ lda t6, -1 # e0 : for masking just below
+ bne t8, $u_final # .. e1 :
+
+ mskql t6, a1, t6 # e0 : mask out the bits we have
+ or t6, t2, t2 # e1 : already extracted before
+ cmpbge zero, t2, t8 # e0 : testing eos
+ bne t8, $u_late_head_exit # .. e1 (zdb)
+
+ /* Finally, we've got all the stupid leading edge cases taken care
+ of and we can set up to enter the main loop. */
+
+ stq_u t1, 0(a0) # e0 : store first output word
+ addq a0, 8, a0 # .. e1 :
+ extql t2, a1, t0 # e0 : position ho-bits of lo word
+ ldq_u t2, 8(a1) # .. e1 : read next high-order source word
+ addq a1, 8, a1 # e0 :
+ cmpbge zero, t2, t8 # .. e1 :
+ nop # e0 :
+ bne t8, $u_eos # .. e1 :
+
+ /* Unaligned copy main loop. In order to avoid reading too much,
+ the loop is structured to detect zeros in aligned source words.
+ This has, unfortunately, effectively pulled half of a loop
+ iteration out into the head and half into the tail, but it does
+ prevent nastiness from accumulating in the very thing we want
+ to run as fast as possible.
+
+ On entry to this basic block:
+ t0 == the shifted high-order bits from the previous source word
+ t2 == the unshifted current source word
+
+ We further know that t2 does not contain a null terminator. */
+
+ .align 3
+$u_loop:
+ extqh t2, a1, t1 # e0 : extract high bits for current word
+ addq a1, 8, a1 # .. e1 :
+ extql t2, a1, t3 # e0 : extract low bits for next time
+ addq a0, 8, a0 # .. e1 :
+ or t0, t1, t1 # e0 : current dst word now complete
+ ldq_u t2, 0(a1) # .. e1 : load high word for next time
+ stq_u t1, -8(a0) # e0 : save the current word
+ mov t3, t0 # .. e1 :
+ cmpbge zero, t2, t8 # e0 : test new word for eos
+ beq t8, $u_loop # .. e1 :
+
+ /* We've found a zero somewhere in the source word we just read.
+ If it resides in the lower half, we have one (probably partial)
+ word to write out, and if it resides in the upper half, we
+ have one full and one partial word left to write out.
+
+ On entry to this basic block:
+ t0 == the shifted high-order bits from the previous source word
+ t2 == the unshifted current source word. */
+$u_eos:
+ extqh t2, a1, t1 # e0 :
+ or t0, t1, t1 # e1 : first (partial) source word complete
+
+ cmpbge zero, t1, t8 # e0 : is the null in this first bit?
+ bne t8, $u_final # .. e1 (zdb)
+
+$u_late_head_exit:
+ stq_u t1, 0(a0) # e0 : the null was in the high-order bits
+ addq a0, 8, a0 # .. e1 :
+ extql t2, a1, t1 # e0 :
+ cmpbge zero, t1, t8 # .. e1 :
+
+ /* Take care of a final (probably partial) result word.
+ On entry to this basic block:
+ t1 == assembled source word
+ t8 == cmpbge mask that found the null. */
+$u_final:
+ negq t8, t6 # e0 : isolate low bit set
+ and t6, t8, t12 # e1 :
+
+ and t12, 0x80, t6 # e0 : avoid dest word load if we can
+ bne t6, 1f # .. e1 (zdb)
+
+ ldq_u t0, 0(a0) # e0 :
+ subq t12, 1, t6 # .. e1 :
+ or t6, t12, t8 # e0 :
+ zapnot t1, t6, t1 # .. e1 : kill source bytes >= null
+ zap t0, t8, t0 # e0 : kill dest bytes <= null
+ or t0, t1, t1 # e1 :
+
+1: stq_u t1, 0(a0) # e0 :
+ ret (t9) # .. e1 :
+
+ /* Unaligned copy entry point. */
+ .align 3
+$unaligned:
+
+ ldq_u t1, 0(a1) # e0 : load first source word
+
+ and a0, 7, t4 # .. e1 : find dest misalignment
+ and a1, 7, t5 # e0 : find src misalignment
+
+ /* Conditionally load the first destination word and a bytemask
+ with 0xff indicating that the destination byte is sacrosanct. */
+
+ mov zero, t0 # .. e1 :
+ mov zero, t6 # e0 :
+ beq t4, 1f # .. e1 :
+ ldq_u t0, 0(a0) # e0 :
+ lda t6, -1 # .. e1 :
+ mskql t6, a0, t6 # e0 :
+1:
+ subq a1, t4, a1 # .. e1 : sub dest misalignment from src addr
+
+ /* If source misalignment is larger than dest misalignment, we need
+ extra startup checks to avoid SEGV. */
+
+ cmplt t4, t5, t12 # e0 :
+ beq t12, $u_head # .. e1 (zdb)
+
+ lda t2, -1 # e1 : mask out leading garbage in source
+ mskqh t2, t5, t2 # e0 :
+ nop # e0 :
+ ornot t1, t2, t3 # .. e1 :
+ cmpbge zero, t3, t8 # e0 : is there a zero?
+ beq t8, $u_head # .. e1 (zdb)
+
+ /* At this point we've found a zero in the first partial word of
+ the source. We need to isolate the valid source data and mask
+ it into the original destination data. (Incidentally, we know
+ that we'll need at least one byte of that original dest word.) */
+
+ ldq_u t0, 0(a0) # e0 :
+
+ negq t8, t6 # .. e1 : build bitmask of bytes <= zero
+ and t6, t8, t12 # e0 :
+ and a1, 7, t5 # .. e1 :
+ subq t12, 1, t6 # e0 :
+ or t6, t12, t8 # e1 :
+ srl t12, t5, t12 # e0 : adjust final null return value
+
+ zapnot t2, t8, t2 # .. e1 : prepare source word; mirror changes
+ and t1, t2, t1 # e1 : to source validity mask
+ extql t2, a1, t2 # .. e0 :
+ extql t1, a1, t1 # e0 :
+
+ andnot t0, t2, t0 # .. e1 : zero place for source to reside
+ or t0, t1, t1 # e1 : and put it there
+ stq_u t1, 0(a0) # .. e0 :
+ ret (t9) # e1 :
+
+ .end __stxcpy
diff --git a/arch/alpha/lib/stxncpy.S b/arch/alpha/lib/stxncpy.S
new file mode 100644
index 000000000000..da1a72740d29
--- /dev/null
+++ b/arch/alpha/lib/stxncpy.S
@@ -0,0 +1,345 @@
+/*
+ * arch/alpha/lib/stxncpy.S
+ * Contributed by Richard Henderson (rth@tamu.edu)
+ *
+ * Copy no more than COUNT bytes of the null-terminated string from
+ * SRC to DST.
+ *
+ * This is an internal routine used by strncpy, stpncpy, and strncat.
+ * As such, it uses special linkage conventions to make implementation
+ * of these public functions more efficient.
+ *
+ * On input:
+ * t9 = return address
+ * a0 = DST
+ * a1 = SRC
+ * a2 = COUNT
+ *
+ * Furthermore, COUNT may not be zero.
+ *
+ * On output:
+ * t0 = last word written
+ * t10 = bitmask (with one bit set) indicating the byte position of
+ * the end of the range specified by COUNT
+ * t12 = bitmask (with one bit set) indicating the last byte written
+ * a0 = unaligned address of the last *word* written
+ * a2 = the number of full words left in COUNT
+ *
+ * Furthermore, v0, a3-a5, t11, and $at are untouched.
+ */
+
+#include <asm/regdef.h>
+
+ .set noat
+ .set noreorder
+
+ .text
+
+/* There is a problem with either gdb (as of 4.16) or gas (as of 2.7) that
+ doesn't like putting the entry point for a procedure somewhere in the
+ middle of the procedure descriptor. Work around this by putting the
+ aligned copy in its own procedure descriptor */
+
+ .ent stxncpy_aligned
+ .align 3
+stxncpy_aligned:
+ .frame sp, 0, t9, 0
+ .prologue 0
+
+ /* On entry to this basic block:
+ t0 == the first destination word for masking back in
+ t1 == the first source word. */
+
+ /* Create the 1st output word and detect 0's in the 1st input word. */
+ lda t2, -1 # e1 : build a mask against false zero
+ mskqh t2, a1, t2 # e0 : detection in the src word
+ mskqh t1, a1, t3 # e0 :
+ ornot t1, t2, t2 # .. e1 :
+ mskql t0, a1, t0 # e0 : assemble the first output word
+ cmpbge zero, t2, t8 # .. e1 : bits set iff null found
+ or t0, t3, t0 # e0 :
+ beq a2, $a_eoc # .. e1 :
+ bne t8, $a_eos # .. e1 :
+
+ /* On entry to this basic block:
+ t0 == a source word not containing a null. */
+
+$a_loop:
+ stq_u t0, 0(a0) # e0 :
+ addq a0, 8, a0 # .. e1 :
+ ldq_u t0, 0(a1) # e0 :
+ addq a1, 8, a1 # .. e1 :
+ subq a2, 1, a2 # e0 :
+ cmpbge zero, t0, t8 # .. e1 (stall)
+ beq a2, $a_eoc # e1 :
+ beq t8, $a_loop # e1 :
+
+ /* Take care of the final (partial) word store. At this point
+ the end-of-count bit is set in t8 iff it applies.
+
+ On entry to this basic block we have:
+ t0 == the source word containing the null
+ t8 == the cmpbge mask that found it. */
+
+$a_eos:
+ negq t8, t12 # e0 : find low bit set
+ and t8, t12, t12 # e1 (stall)
+
+ /* For the sake of the cache, don't read a destination word
+ if we're not going to need it. */
+ and t12, 0x80, t6 # e0 :
+ bne t6, 1f # .. e1 (zdb)
+
+ /* We're doing a partial word store and so need to combine
+ our source and original destination words. */
+ ldq_u t1, 0(a0) # e0 :
+ subq t12, 1, t6 # .. e1 :
+ or t12, t6, t8 # e0 :
+ unop #
+ zapnot t0, t8, t0 # e0 : clear src bytes > null
+ zap t1, t8, t1 # .. e1 : clear dst bytes <= null
+ or t0, t1, t0 # e1 :
+
+1: stq_u t0, 0(a0) # e0 :
+ ret (t9) # e1 :
+
+ /* Add the end-of-count bit to the eos detection bitmask. */
+$a_eoc:
+ or t10, t8, t8
+ br $a_eos
+
+ .end stxncpy_aligned
+
+ .align 3
+ .ent __stxncpy
+ .globl __stxncpy
+__stxncpy:
+ .frame sp, 0, t9, 0
+ .prologue 0
+
+ /* Are source and destination co-aligned? */
+ xor a0, a1, t1 # e0 :
+ and a0, 7, t0 # .. e1 : find dest misalignment
+ and t1, 7, t1 # e0 :
+ addq a2, t0, a2 # .. e1 : bias count by dest misalignment
+ subq a2, 1, a2 # e0 :
+ and a2, 7, t2 # e1 :
+ srl a2, 3, a2 # e0 : a2 = loop counter = (count - 1)/8
+ addq zero, 1, t10 # .. e1 :
+ sll t10, t2, t10 # e0 : t10 = bitmask of last count byte
+ bne t1, $unaligned # .. e1 :
+
+ /* We are co-aligned; take care of a partial first word. */
+
+ ldq_u t1, 0(a1) # e0 : load first src word
+ addq a1, 8, a1 # .. e1 :
+
+ beq t0, stxncpy_aligned # avoid loading dest word if not needed
+ ldq_u t0, 0(a0) # e0 :
+ br stxncpy_aligned # .. e1 :
+
+
+/* The source and destination are not co-aligned. Align the destination
+ and cope. We have to be very careful about not reading too much and
+ causing a SEGV. */
+
+ .align 3
+$u_head:
+ /* We know just enough now to be able to assemble the first
+ full source word. We can still find a zero at the end of it
+ that prevents us from outputting the whole thing.
+
+ On entry to this basic block:
+ t0 == the first dest word, unmasked
+ t1 == the shifted low bits of the first source word
+ t6 == bytemask that is -1 in dest word bytes */
+
+ ldq_u t2, 8(a1) # e0 : load second src word
+ addq a1, 8, a1 # .. e1 :
+ mskql t0, a0, t0 # e0 : mask trailing garbage in dst
+ extqh t2, a1, t4 # e0 :
+ or t1, t4, t1 # e1 : first aligned src word complete
+ mskqh t1, a0, t1 # e0 : mask leading garbage in src
+ or t0, t1, t0 # e0 : first output word complete
+ or t0, t6, t6 # e1 : mask original data for zero test
+ cmpbge zero, t6, t8 # e0 :
+ beq a2, $u_eocfin # .. e1 :
+ lda t6, -1 # e0 :
+ bne t8, $u_final # .. e1 :
+
+ mskql t6, a1, t6 # e0 : mask out bits already seen
+ nop # .. e1 :
+ stq_u t0, 0(a0) # e0 : store first output word
+ or t6, t2, t2 # .. e1 :
+ cmpbge zero, t2, t8 # e0 : find nulls in second partial
+ addq a0, 8, a0 # .. e1 :
+ subq a2, 1, a2 # e0 :
+ bne t8, $u_late_head_exit # .. e1 :
+
+ /* Finally, we've got all the stupid leading edge cases taken care
+ of and we can set up to enter the main loop. */
+
+ extql t2, a1, t1 # e0 : position hi-bits of lo word
+ beq a2, $u_eoc # .. e1 :
+ ldq_u t2, 8(a1) # e0 : read next high-order source word
+ addq a1, 8, a1 # .. e1 :
+ extqh t2, a1, t0 # e0 : position lo-bits of hi word (stall)
+ cmpbge zero, t2, t8 # .. e1 :
+ nop # e0 :
+ bne t8, $u_eos # .. e1 :
+
+ /* Unaligned copy main loop. In order to avoid reading too much,
+ the loop is structured to detect zeros in aligned source words.
+ This has, unfortunately, effectively pulled half of a loop
+ iteration out into the head and half into the tail, but it does
+ prevent nastiness from accumulating in the very thing we want
+ to run as fast as possible.
+
+ On entry to this basic block:
+ t0 == the shifted low-order bits from the current source word
+ t1 == the shifted high-order bits from the previous source word
+ t2 == the unshifted current source word
+
+ We further know that t2 does not contain a null terminator. */
+
+ .align 3
+$u_loop:
+ or t0, t1, t0 # e0 : current dst word now complete
+ subq a2, 1, a2 # .. e1 : decrement word count
+ stq_u t0, 0(a0) # e0 : save the current word
+ addq a0, 8, a0 # .. e1 :
+ extql t2, a1, t1 # e0 : extract high bits for next time
+ beq a2, $u_eoc # .. e1 :
+ ldq_u t2, 8(a1) # e0 : load high word for next time
+ addq a1, 8, a1 # .. e1 :
+ nop # e0 :
+ cmpbge zero, t2, t8 # e1 : test new word for eos (stall)
+ extqh t2, a1, t0 # e0 : extract low bits for current word
+ beq t8, $u_loop # .. e1 :
+
+ /* We've found a zero somewhere in the source word we just read.
+ If it resides in the lower half, we have one (probably partial)
+ word to write out, and if it resides in the upper half, we
+ have one full and one partial word left to write out.
+
+ On entry to this basic block:
+ t0 == the shifted low-order bits from the current source word
+ t1 == the shifted high-order bits from the previous source word
+ t2 == the unshifted current source word. */
+$u_eos:
+ or t0, t1, t0 # e0 : first (partial) source word complete
+ nop # .. e1 :
+ cmpbge zero, t0, t8 # e0 : is the null in this first bit?
+ bne t8, $u_final # .. e1 (zdb)
+
+ stq_u t0, 0(a0) # e0 : the null was in the high-order bits
+ addq a0, 8, a0 # .. e1 :
+ subq a2, 1, a2 # e1 :
+
+$u_late_head_exit:
+ extql t2, a1, t0 # .. e0 :
+ cmpbge zero, t0, t8 # e0 :
+ or t8, t10, t6 # e1 :
+ cmoveq a2, t6, t8 # e0 :
+ nop # .. e1 :
+
+ /* Take care of a final (probably partial) result word.
+ On entry to this basic block:
+ t0 == assembled source word
+ t8 == cmpbge mask that found the null. */
+$u_final:
+ negq t8, t6 # e0 : isolate low bit set
+ and t6, t8, t12 # e1 :
+
+ and t12, 0x80, t6 # e0 : avoid dest word load if we can
+ bne t6, 1f # .. e1 (zdb)
+
+ ldq_u t1, 0(a0) # e0 :
+ subq t12, 1, t6 # .. e1 :
+ or t6, t12, t8 # e0 :
+ zapnot t0, t8, t0 # .. e1 : kill source bytes > null
+ zap t1, t8, t1 # e0 : kill dest bytes <= null
+ or t0, t1, t0 # e1 :
+
+1: stq_u t0, 0(a0) # e0 :
+ ret (t9) # .. e1 :
+
+ /* Got to end-of-count before end of string.
+ On entry to this basic block:
+ t1 == the shifted high-order bits from the previous source word */
+$u_eoc:
+ and a1, 7, t6 # e1 :
+ sll t10, t6, t6 # e0 :
+ and t6, 0xff, t6 # e0 :
+ bne t6, 1f # .. e1 :
+
+ ldq_u t2, 8(a1) # e0 : load final src word
+ nop # .. e1 :
+ extqh t2, a1, t0 # e0 : extract low bits for last word
+ or t1, t0, t1 # e1 :
+
+1: cmpbge zero, t1, t8
+ mov t1, t0
+
+$u_eocfin: # end-of-count, final word
+ or t10, t8, t8
+ br $u_final
+
+ /* Unaligned copy entry point. */
+ .align 3
+$unaligned:
+
+ ldq_u t1, 0(a1) # e0 : load first source word
+
+ and a0, 7, t4 # .. e1 : find dest misalignment
+ and a1, 7, t5 # e0 : find src misalignment
+
+ /* Conditionally load the first destination word and a bytemask
+ with 0xff indicating that the destination byte is sacrosanct. */
+
+ mov zero, t0 # .. e1 :
+ mov zero, t6 # e0 :
+ beq t4, 1f # .. e1 :
+ ldq_u t0, 0(a0) # e0 :
+ lda t6, -1 # .. e1 :
+ mskql t6, a0, t6 # e0 :
+ subq a1, t4, a1 # .. e1 : sub dest misalignment from src addr
+
+ /* If source misalignment is larger than dest misalignment, we need
+ extra startup checks to avoid SEGV. */
+
+1: cmplt t4, t5, t12 # e1 :
+ extql t1, a1, t1 # .. e0 : shift src into place
+ lda t2, -1 # e0 : for creating masks later
+ beq t12, $u_head # .. e1 :
+
+ extql t2, a1, t2 # e0 :
+ cmpbge zero, t1, t8 # .. e1 : is there a zero?
+ andnot t2, t6, t12 # e0 : dest mask for a single word copy
+ or t8, t10, t5 # .. e1 : test for end-of-count too
+ cmpbge zero, t12, t3 # e0 :
+ cmoveq a2, t5, t8 # .. e1 :
+ andnot t8, t3, t8 # e0 :
+ beq t8, $u_head # .. e1 (zdb)
+
+ /* At this point we've found a zero in the first partial word of
+ the source. We need to isolate the valid source data and mask
+ it into the original destination data. (Incidentally, we know
+ that we'll need at least one byte of that original dest word.) */
+
+ ldq_u t0, 0(a0) # e0 :
+ negq t8, t6 # .. e1 : build bitmask of bytes <= zero
+ mskqh t1, t4, t1 # e0 :
+ and t6, t8, t2 # .. e1 :
+ subq t2, 1, t6 # e0 :
+ or t6, t2, t8 # e1 :
+
+ zapnot t12, t8, t12 # e0 : prepare source word; mirror changes
+ zapnot t1, t8, t1 # .. e1 : to source validity mask
+
+ andnot t0, t12, t0 # e0 : zero place for source to reside
+ or t0, t1, t0 # e1 : and put it there
+ stq_u t0, 0(a0) # e0 :
+ ret (t9) # .. e1 :
+
+ .end __stxncpy
diff --git a/arch/alpha/lib/udelay.c b/arch/alpha/lib/udelay.c
new file mode 100644
index 000000000000..1c879bbce419
--- /dev/null
+++ b/arch/alpha/lib/udelay.c
@@ -0,0 +1,55 @@
+/*
+ * Copyright (C) 1993, 2000 Linus Torvalds
+ *
+ * Delay routines, using a pre-computed "loops_per_jiffy" value.
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/sched.h> /* for udelay's use of smp_processor_id */
+#include <asm/param.h>
+#include <asm/smp.h>
+#include <linux/delay.h>
+
+/*
+ * Use only for very small delays (< 1 msec).
+ *
+ * The active part of our cycle counter is only 32-bits wide, and
+ * we're treating the difference between two marks as signed. On
+ * a 1GHz box, that's about 2 seconds.
+ */
+
+void
+__delay(int loops)
+{
+ int tmp;
+ __asm__ __volatile__(
+ " rpcc %0\n"
+ " addl %1,%0,%1\n"
+ "1: rpcc %0\n"
+ " subl %1,%0,%0\n"
+ " bgt %0,1b"
+ : "=&r" (tmp), "=r" (loops) : "1"(loops));
+}
+
+#ifdef CONFIG_SMP
+#define LPJ cpu_data[smp_processor_id()].loops_per_jiffy
+#else
+#define LPJ loops_per_jiffy
+#endif
+
+void
+udelay(unsigned long usecs)
+{
+ usecs *= (((unsigned long)HZ << 32) / 1000000) * LPJ;
+ __delay((long)usecs >> 32);
+}
+EXPORT_SYMBOL(udelay);
+
+void
+ndelay(unsigned long nsecs)
+{
+ nsecs *= (((unsigned long)HZ << 32) / 1000000000) * LPJ;
+ __delay((long)nsecs >> 32);
+}
+EXPORT_SYMBOL(ndelay);
diff --git a/arch/alpha/math-emu/Makefile b/arch/alpha/math-emu/Makefile
new file mode 100644
index 000000000000..359ef087e69e
--- /dev/null
+++ b/arch/alpha/math-emu/Makefile
@@ -0,0 +1,9 @@
+#
+# Makefile for the FPU instruction emulation.
+#
+
+EXTRA_CFLAGS := -w
+
+obj-$(CONFIG_MATHEMU) += math-emu.o
+
+math-emu-objs := math.o qrnnd.o
diff --git a/arch/alpha/math-emu/math.c b/arch/alpha/math-emu/math.c
new file mode 100644
index 000000000000..ae79dd970b02
--- /dev/null
+++ b/arch/alpha/math-emu/math.c
@@ -0,0 +1,400 @@
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+
+#include <asm/uaccess.h>
+
+#include "sfp-util.h"
+#include <math-emu/soft-fp.h>
+#include <math-emu/single.h>
+#include <math-emu/double.h>
+
+#define OPC_PAL 0x00
+#define OPC_INTA 0x10
+#define OPC_INTL 0x11
+#define OPC_INTS 0x12
+#define OPC_INTM 0x13
+#define OPC_FLTC 0x14
+#define OPC_FLTV 0x15
+#define OPC_FLTI 0x16
+#define OPC_FLTL 0x17
+#define OPC_MISC 0x18
+#define OPC_JSR 0x1a
+
+#define FOP_SRC_S 0
+#define FOP_SRC_T 2
+#define FOP_SRC_Q 3
+
+#define FOP_FNC_ADDx 0
+#define FOP_FNC_CVTQL 0
+#define FOP_FNC_SUBx 1
+#define FOP_FNC_MULx 2
+#define FOP_FNC_DIVx 3
+#define FOP_FNC_CMPxUN 4
+#define FOP_FNC_CMPxEQ 5
+#define FOP_FNC_CMPxLT 6
+#define FOP_FNC_CMPxLE 7
+#define FOP_FNC_SQRTx 11
+#define FOP_FNC_CVTxS 12
+#define FOP_FNC_CVTxT 14
+#define FOP_FNC_CVTxQ 15
+
+#define MISC_TRAPB 0x0000
+#define MISC_EXCB 0x0400
+
+extern unsigned long alpha_read_fp_reg (unsigned long reg);
+extern void alpha_write_fp_reg (unsigned long reg, unsigned long val);
+extern unsigned long alpha_read_fp_reg_s (unsigned long reg);
+extern void alpha_write_fp_reg_s (unsigned long reg, unsigned long val);
+
+
+#ifdef MODULE
+
+MODULE_DESCRIPTION("FP Software completion module");
+
+extern long (*alpha_fp_emul_imprecise)(struct pt_regs *, unsigned long);
+extern long (*alpha_fp_emul) (unsigned long pc);
+
+static long (*save_emul_imprecise)(struct pt_regs *, unsigned long);
+static long (*save_emul) (unsigned long pc);
+
+long do_alpha_fp_emul_imprecise(struct pt_regs *, unsigned long);
+long do_alpha_fp_emul(unsigned long);
+
+int init_module(void)
+{
+ save_emul_imprecise = alpha_fp_emul_imprecise;
+ save_emul = alpha_fp_emul;
+ alpha_fp_emul_imprecise = do_alpha_fp_emul_imprecise;
+ alpha_fp_emul = do_alpha_fp_emul;
+ return 0;
+}
+
+void cleanup_module(void)
+{
+ alpha_fp_emul_imprecise = save_emul_imprecise;
+ alpha_fp_emul = save_emul;
+}
+
+#undef alpha_fp_emul_imprecise
+#define alpha_fp_emul_imprecise do_alpha_fp_emul_imprecise
+#undef alpha_fp_emul
+#define alpha_fp_emul do_alpha_fp_emul
+
+#endif /* MODULE */
+
+
+/*
+ * Emulate the floating point instruction at address PC. Returns -1 if the
+ * instruction to be emulated is illegal (such as with the opDEC trap), else
+ * the SI_CODE for a SIGFPE signal, else 0 if everything's ok.
+ *
+ * Notice that the kernel does not and cannot use FP regs. This is good
+ * because it means that instead of saving/restoring all fp regs, we simply
+ * stick the result of the operation into the appropriate register.
+ */
+long
+alpha_fp_emul (unsigned long pc)
+{
+ FP_DECL_EX;
+ FP_DECL_S(SA); FP_DECL_S(SB); FP_DECL_S(SR);
+ FP_DECL_D(DA); FP_DECL_D(DB); FP_DECL_D(DR);
+
+ unsigned long fa, fb, fc, func, mode, src;
+ unsigned long res, va, vb, vc, swcr, fpcr;
+ __u32 insn;
+ long si_code;
+
+ get_user(insn, (__u32 __user *)pc);
+ fc = (insn >> 0) & 0x1f; /* destination register */
+ fb = (insn >> 16) & 0x1f;
+ fa = (insn >> 21) & 0x1f;
+ func = (insn >> 5) & 0xf;
+ src = (insn >> 9) & 0x3;
+ mode = (insn >> 11) & 0x3;
+
+ fpcr = rdfpcr();
+ swcr = swcr_update_status(current_thread_info()->ieee_state, fpcr);
+
+ if (mode == 3) {
+ /* Dynamic -- get rounding mode from fpcr. */
+ mode = (fpcr >> FPCR_DYN_SHIFT) & 3;
+ }
+
+ switch (src) {
+ case FOP_SRC_S:
+ va = alpha_read_fp_reg_s(fa);
+ vb = alpha_read_fp_reg_s(fb);
+
+ FP_UNPACK_SP(SA, &va);
+ FP_UNPACK_SP(SB, &vb);
+
+ switch (func) {
+ case FOP_FNC_SUBx:
+ FP_SUB_S(SR, SA, SB);
+ goto pack_s;
+
+ case FOP_FNC_ADDx:
+ FP_ADD_S(SR, SA, SB);
+ goto pack_s;
+
+ case FOP_FNC_MULx:
+ FP_MUL_S(SR, SA, SB);
+ goto pack_s;
+
+ case FOP_FNC_DIVx:
+ FP_DIV_S(SR, SA, SB);
+ goto pack_s;
+
+ case FOP_FNC_SQRTx:
+ FP_SQRT_S(SR, SB);
+ goto pack_s;
+ }
+ goto bad_insn;
+
+ case FOP_SRC_T:
+ va = alpha_read_fp_reg(fa);
+ vb = alpha_read_fp_reg(fb);
+
+ if ((func & ~3) == FOP_FNC_CMPxUN) {
+ FP_UNPACK_RAW_DP(DA, &va);
+ FP_UNPACK_RAW_DP(DB, &vb);
+ if (!DA_e && !_FP_FRAC_ZEROP_1(DA)) {
+ FP_SET_EXCEPTION(FP_EX_DENORM);
+ if (FP_DENORM_ZERO)
+ _FP_FRAC_SET_1(DA, _FP_ZEROFRAC_1);
+ }
+ if (!DB_e && !_FP_FRAC_ZEROP_1(DB)) {
+ FP_SET_EXCEPTION(FP_EX_DENORM);
+ if (FP_DENORM_ZERO)
+ _FP_FRAC_SET_1(DB, _FP_ZEROFRAC_1);
+ }
+ FP_CMP_D(res, DA, DB, 3);
+ vc = 0x4000000000000000UL;
+ /* CMPTEQ, CMPTUN don't trap on QNaN,
+ while CMPTLT and CMPTLE do */
+ if (res == 3
+ && ((func & 3) >= 2
+ || FP_ISSIGNAN_D(DA)
+ || FP_ISSIGNAN_D(DB))) {
+ FP_SET_EXCEPTION(FP_EX_INVALID);
+ }
+ switch (func) {
+ case FOP_FNC_CMPxUN: if (res != 3) vc = 0; break;
+ case FOP_FNC_CMPxEQ: if (res) vc = 0; break;
+ case FOP_FNC_CMPxLT: if (res != -1) vc = 0; break;
+ case FOP_FNC_CMPxLE: if ((long)res > 0) vc = 0; break;
+ }
+ goto done_d;
+ }
+
+ FP_UNPACK_DP(DA, &va);
+ FP_UNPACK_DP(DB, &vb);
+
+ switch (func) {
+ case FOP_FNC_SUBx:
+ FP_SUB_D(DR, DA, DB);
+ goto pack_d;
+
+ case FOP_FNC_ADDx:
+ FP_ADD_D(DR, DA, DB);
+ goto pack_d;
+
+ case FOP_FNC_MULx:
+ FP_MUL_D(DR, DA, DB);
+ goto pack_d;
+
+ case FOP_FNC_DIVx:
+ FP_DIV_D(DR, DA, DB);
+ goto pack_d;
+
+ case FOP_FNC_SQRTx:
+ FP_SQRT_D(DR, DB);
+ goto pack_d;
+
+ case FOP_FNC_CVTxS:
+ /* It is irritating that DEC encoded CVTST with
+ SRC == T_floating. It is also interesting that
+ the bit used to tell the two apart is /U... */
+ if (insn & 0x2000) {
+ FP_CONV(S,D,1,1,SR,DB);
+ goto pack_s;
+ } else {
+ vb = alpha_read_fp_reg_s(fb);
+ FP_UNPACK_SP(SB, &vb);
+ DR_c = DB_c;
+ DR_s = DB_s;
+ DR_e = DB_e;
+ DR_f = SB_f << (52 - 23);
+ goto pack_d;
+ }
+
+ case FOP_FNC_CVTxQ:
+ if (DB_c == FP_CLS_NAN
+ && (_FP_FRAC_HIGH_RAW_D(DB) & _FP_QNANBIT_D)) {
+ /* AAHB Table B-2 says QNaN should not trigger INV */
+ vc = 0;
+ } else
+ FP_TO_INT_ROUND_D(vc, DB, 64, 2);
+ goto done_d;
+ }
+ goto bad_insn;
+
+ case FOP_SRC_Q:
+ vb = alpha_read_fp_reg(fb);
+
+ switch (func) {
+ case FOP_FNC_CVTQL:
+ /* Notice: We can get here only due to an integer
+ overflow. Such overflows are reported as invalid
+ ops. We return the result the hw would have
+ computed. */
+ vc = ((vb & 0xc0000000) << 32 | /* sign and msb */
+ (vb & 0x3fffffff) << 29); /* rest of the int */
+ FP_SET_EXCEPTION (FP_EX_INVALID);
+ goto done_d;
+
+ case FOP_FNC_CVTxS:
+ FP_FROM_INT_S(SR, ((long)vb), 64, long);
+ goto pack_s;
+
+ case FOP_FNC_CVTxT:
+ FP_FROM_INT_D(DR, ((long)vb), 64, long);
+ goto pack_d;
+ }
+ goto bad_insn;
+ }
+ goto bad_insn;
+
+pack_s:
+ FP_PACK_SP(&vc, SR);
+ if ((_fex & FP_EX_UNDERFLOW) && (swcr & IEEE_MAP_UMZ))
+ vc = 0;
+ alpha_write_fp_reg_s(fc, vc);
+ goto done;
+
+pack_d:
+ FP_PACK_DP(&vc, DR);
+ if ((_fex & FP_EX_UNDERFLOW) && (swcr & IEEE_MAP_UMZ))
+ vc = 0;
+done_d:
+ alpha_write_fp_reg(fc, vc);
+ goto done;
+
+ /*
+ * Take the appropriate action for each possible
+ * floating-point result:
+ *
+ * - Set the appropriate bits in the FPCR
+ * - If the specified exception is enabled in the FPCR,
+ * return. The caller (entArith) will dispatch
+ * the appropriate signal to the translated program.
+ *
+ * In addition, properly track the exception state in software
+ * as described in the Alpha Architecture Handbook section 4.7.7.3.
+ */
+done:
+ if (_fex) {
+ /* Record exceptions in software control word. */
+ swcr |= (_fex << IEEE_STATUS_TO_EXCSUM_SHIFT);
+ current_thread_info()->ieee_state
+ |= (_fex << IEEE_STATUS_TO_EXCSUM_SHIFT);
+
+ /* Update hardware control register. */
+ fpcr &= (~FPCR_MASK | FPCR_DYN_MASK);
+ fpcr |= ieee_swcr_to_fpcr(swcr);
+ wrfpcr(fpcr);
+
+ /* Do we generate a signal? */
+ _fex = _fex & swcr & IEEE_TRAP_ENABLE_MASK;
+ si_code = 0;
+ if (_fex) {
+ if (_fex & IEEE_TRAP_ENABLE_DNO) si_code = FPE_FLTUND;
+ if (_fex & IEEE_TRAP_ENABLE_INE) si_code = FPE_FLTRES;
+ if (_fex & IEEE_TRAP_ENABLE_UNF) si_code = FPE_FLTUND;
+ if (_fex & IEEE_TRAP_ENABLE_OVF) si_code = FPE_FLTOVF;
+ if (_fex & IEEE_TRAP_ENABLE_DZE) si_code = FPE_FLTDIV;
+ if (_fex & IEEE_TRAP_ENABLE_INV) si_code = FPE_FLTINV;
+ }
+
+ return si_code;
+ }
+
+ /* We used to write the destination register here, but DEC FORTRAN
+ requires that the result *always* be written... so we do the write
+ immediately after the operations above. */
+
+ return 0;
+
+bad_insn:
+ printk(KERN_ERR "alpha_fp_emul: Invalid FP insn %#x at %#lx\n",
+ insn, pc);
+ return -1;
+}
+
+long
+alpha_fp_emul_imprecise (struct pt_regs *regs, unsigned long write_mask)
+{
+ unsigned long trigger_pc = regs->pc - 4;
+ unsigned long insn, opcode, rc, si_code = 0;
+
+ /*
+ * Turn off the bits corresponding to registers that are the
+ * target of instructions that set bits in the exception
+ * summary register. We have some slack doing this because a
+ * register that is the target of a trapping instruction can
+ * be written at most once in the trap shadow.
+ *
+ * Branches, jumps, TRAPBs, EXCBs and calls to PALcode all
+ * bound the trap shadow, so we need not look any further than
+ * up to the first occurrence of such an instruction.
+ */
+ while (write_mask) {
+ get_user(insn, (__u32 __user *)(trigger_pc));
+ opcode = insn >> 26;
+ rc = insn & 0x1f;
+
+ switch (opcode) {
+ case OPC_PAL:
+ case OPC_JSR:
+ case 0x30 ... 0x3f: /* branches */
+ goto egress;
+
+ case OPC_MISC:
+ switch (insn & 0xffff) {
+ case MISC_TRAPB:
+ case MISC_EXCB:
+ goto egress;
+
+ default:
+ break;
+ }
+ break;
+
+ case OPC_INTA:
+ case OPC_INTL:
+ case OPC_INTS:
+ case OPC_INTM:
+ write_mask &= ~(1UL << rc);
+ break;
+
+ case OPC_FLTC:
+ case OPC_FLTV:
+ case OPC_FLTI:
+ case OPC_FLTL:
+ write_mask &= ~(1UL << (rc + 32));
+ break;
+ }
+ if (!write_mask) {
+ /* Re-execute insns in the trap-shadow. */
+ regs->pc = trigger_pc + 4;
+ si_code = alpha_fp_emul(trigger_pc);
+ goto egress;
+ }
+ trigger_pc -= 4;
+ }
+
+egress:
+ return si_code;
+}
diff --git a/arch/alpha/math-emu/qrnnd.S b/arch/alpha/math-emu/qrnnd.S
new file mode 100644
index 000000000000..d6373ec1bff9
--- /dev/null
+++ b/arch/alpha/math-emu/qrnnd.S
@@ -0,0 +1,163 @@
+ # Alpha 21064 __udiv_qrnnd
+ # Copyright (C) 1992, 1994, 1995, 2000 Free Software Foundation, Inc.
+
+ # This file is part of GCC.
+
+ # The GNU MP Library is free software; you can redistribute it and/or modify
+ # it under the terms of the GNU General Public License as published by
+ # the Free Software Foundation; either version 2 of the License, or (at your
+ # option) any later version.
+
+ # In addition to the permissions in the GNU General Public License, the
+ # Free Software Foundation gives you unlimited permission to link the
+ # compiled version of this file with other programs, and to distribute
+ # those programs without any restriction coming from the use of this
+ # file. (The General Public License restrictions do apply in other
+ # respects; for example, they cover modification of the file, and
+ # distribution when not linked into another program.)
+
+ # This file is distributed in the hope that it will be useful, but
+ # WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ # or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Library General Public
+ # License for more details.
+
+ # You should have received a copy of the GNU General Public License
+ # along with GCC; see the file COPYING. If not, write to the
+ # Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston,
+ # MA 02111-1307, USA.
+
+ .set noreorder
+ .set noat
+
+ .text
+
+ .globl __udiv_qrnnd
+ .ent __udiv_qrnnd
+__udiv_qrnnd:
+ .frame $30,0,$26,0
+ .prologue 0
+
+#define cnt $2
+#define tmp $3
+#define rem_ptr $16
+#define n1 $17
+#define n0 $18
+#define d $19
+#define qb $20
+#define AT $at
+
+ ldiq cnt,16
+ blt d,$largedivisor
+
+$loop1: cmplt n0,0,tmp
+ addq n1,n1,n1
+ bis n1,tmp,n1
+ addq n0,n0,n0
+ cmpule d,n1,qb
+ subq n1,d,tmp
+ cmovne qb,tmp,n1
+ bis n0,qb,n0
+ cmplt n0,0,tmp
+ addq n1,n1,n1
+ bis n1,tmp,n1
+ addq n0,n0,n0
+ cmpule d,n1,qb
+ subq n1,d,tmp
+ cmovne qb,tmp,n1
+ bis n0,qb,n0
+ cmplt n0,0,tmp
+ addq n1,n1,n1
+ bis n1,tmp,n1
+ addq n0,n0,n0
+ cmpule d,n1,qb
+ subq n1,d,tmp
+ cmovne qb,tmp,n1
+ bis n0,qb,n0
+ cmplt n0,0,tmp
+ addq n1,n1,n1
+ bis n1,tmp,n1
+ addq n0,n0,n0
+ cmpule d,n1,qb
+ subq n1,d,tmp
+ cmovne qb,tmp,n1
+ bis n0,qb,n0
+ subq cnt,1,cnt
+ bgt cnt,$loop1
+ stq n1,0(rem_ptr)
+ bis $31,n0,$0
+ ret $31,($26),1
+
+$largedivisor:
+ and n0,1,$4
+
+ srl n0,1,n0
+ sll n1,63,tmp
+ or tmp,n0,n0
+ srl n1,1,n1
+
+ and d,1,$6
+ srl d,1,$5
+ addq $5,$6,$5
+
+$loop2: cmplt n0,0,tmp
+ addq n1,n1,n1
+ bis n1,tmp,n1
+ addq n0,n0,n0
+ cmpule $5,n1,qb
+ subq n1,$5,tmp
+ cmovne qb,tmp,n1
+ bis n0,qb,n0
+ cmplt n0,0,tmp
+ addq n1,n1,n1
+ bis n1,tmp,n1
+ addq n0,n0,n0
+ cmpule $5,n1,qb
+ subq n1,$5,tmp
+ cmovne qb,tmp,n1
+ bis n0,qb,n0
+ cmplt n0,0,tmp
+ addq n1,n1,n1
+ bis n1,tmp,n1
+ addq n0,n0,n0
+ cmpule $5,n1,qb
+ subq n1,$5,tmp
+ cmovne qb,tmp,n1
+ bis n0,qb,n0
+ cmplt n0,0,tmp
+ addq n1,n1,n1
+ bis n1,tmp,n1
+ addq n0,n0,n0
+ cmpule $5,n1,qb
+ subq n1,$5,tmp
+ cmovne qb,tmp,n1
+ bis n0,qb,n0
+ subq cnt,1,cnt
+ bgt cnt,$loop2
+
+ addq n1,n1,n1
+ addq $4,n1,n1
+ bne $6,$Odd
+ stq n1,0(rem_ptr)
+ bis $31,n0,$0
+ ret $31,($26),1
+
+$Odd:
+ /* q' in n0. r' in n1 */
+ addq n1,n0,n1
+
+ cmpult n1,n0,tmp # tmp := carry from addq
+ subq n1,d,AT
+ addq n0,tmp,n0
+ cmovne tmp,AT,n1
+
+ cmpult n1,d,tmp
+ addq n0,1,AT
+ cmoveq tmp,AT,n0
+ subq n1,d,AT
+ cmoveq tmp,AT,n1
+
+ stq n1,0(rem_ptr)
+ bis $31,n0,$0
+ ret $31,($26),1
+
+ .end __udiv_qrnnd
diff --git a/arch/alpha/math-emu/sfp-util.h b/arch/alpha/math-emu/sfp-util.h
new file mode 100644
index 000000000000..f53707f77455
--- /dev/null
+++ b/arch/alpha/math-emu/sfp-util.h
@@ -0,0 +1,35 @@
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/types.h>
+#include <asm/byteorder.h>
+#include <asm/fpu.h>
+
+#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
+ ((sl) = (al) + (bl), (sh) = (ah) + (bh) + ((sl) < (al)))
+
+#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
+ ((sl) = (al) - (bl), (sh) = (ah) - (bh) - ((al) < (bl)))
+
+#define umul_ppmm(wh, wl, u, v) \
+ __asm__ ("mulq %2,%3,%1; umulh %2,%3,%0" \
+ : "=r" ((UDItype)(wh)), \
+ "=&r" ((UDItype)(wl)) \
+ : "r" ((UDItype)(u)), \
+ "r" ((UDItype)(v)))
+
+#define udiv_qrnnd(q, r, n1, n0, d) \
+ do { unsigned long __r; \
+ (q) = __udiv_qrnnd (&__r, (n1), (n0), (d)); \
+ (r) = __r; \
+ } while (0)
+extern unsigned long __udiv_qrnnd (unsigned long *, unsigned long,
+ unsigned long , unsigned long);
+
+#define UDIV_NEEDS_NORMALIZATION 1
+
+#define abort() goto bad_insn
+
+#ifndef __LITTLE_ENDIAN
+#define __LITTLE_ENDIAN -1
+#endif
+#define __BYTE_ORDER __LITTLE_ENDIAN
diff --git a/arch/alpha/mm/Makefile b/arch/alpha/mm/Makefile
new file mode 100644
index 000000000000..6edd9a09ea4f
--- /dev/null
+++ b/arch/alpha/mm/Makefile
@@ -0,0 +1,9 @@
+#
+# Makefile for the linux alpha-specific parts of the memory manager.
+#
+
+EXTRA_CFLAGS := -Werror
+
+obj-y := init.o fault.o extable.o remap.o
+
+obj-$(CONFIG_DISCONTIGMEM) += numa.o
diff --git a/arch/alpha/mm/extable.c b/arch/alpha/mm/extable.c
new file mode 100644
index 000000000000..c3849baebd57
--- /dev/null
+++ b/arch/alpha/mm/extable.c
@@ -0,0 +1,34 @@
+/*
+ * linux/arch/alpha/mm/extable.c
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <asm/uaccess.h>
+
+void sort_extable(struct exception_table_entry *start,
+ struct exception_table_entry *finish)
+{
+}
+
+const struct exception_table_entry *
+search_extable(const struct exception_table_entry *first,
+ const struct exception_table_entry *last,
+ unsigned long value)
+{
+ while (first <= last) {
+ const struct exception_table_entry *mid;
+ unsigned long mid_value;
+
+ mid = (last - first) / 2 + first;
+ mid_value = (unsigned long)&mid->insn + mid->insn;
+ if (mid_value == value)
+ return mid;
+ else if (mid_value < value)
+ first = mid+1;
+ else
+ last = mid-1;
+ }
+
+ return NULL;
+}
diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c
new file mode 100644
index 000000000000..64ace5a9cd3d
--- /dev/null
+++ b/arch/alpha/mm/fault.c
@@ -0,0 +1,247 @@
+/*
+ * linux/arch/alpha/mm/fault.c
+ *
+ * Copyright (C) 1995 Linus Torvalds
+ */
+
+#include <linux/config.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <asm/io.h>
+
+#define __EXTERN_INLINE inline
+#include <asm/mmu_context.h>
+#include <asm/tlbflush.h>
+#undef __EXTERN_INLINE
+
+#include <linux/signal.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/ptrace.h>
+#include <linux/mman.h>
+#include <linux/smp.h>
+#include <linux/smp_lock.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+
+#include <asm/system.h>
+#include <asm/uaccess.h>
+
+extern void die_if_kernel(char *,struct pt_regs *,long, unsigned long *);
+
+
+/*
+ * Force a new ASN for a task.
+ */
+
+#ifndef CONFIG_SMP
+unsigned long last_asn = ASN_FIRST_VERSION;
+#endif
+
+void
+__load_new_mm_context(struct mm_struct *next_mm)
+{
+ unsigned long mmc;
+ struct pcb_struct *pcb;
+
+ mmc = __get_new_mm_context(next_mm, smp_processor_id());
+ next_mm->context[smp_processor_id()] = mmc;
+
+ pcb = &current_thread_info()->pcb;
+ pcb->asn = mmc & HARDWARE_ASN_MASK;
+ pcb->ptbr = ((unsigned long) next_mm->pgd - IDENT_ADDR) >> PAGE_SHIFT;
+
+ __reload_thread(pcb);
+}
+
+
+/*
+ * This routine handles page faults. It determines the address,
+ * and the problem, and then passes it off to handle_mm_fault().
+ *
+ * mmcsr:
+ * 0 = translation not valid
+ * 1 = access violation
+ * 2 = fault-on-read
+ * 3 = fault-on-execute
+ * 4 = fault-on-write
+ *
+ * cause:
+ * -1 = instruction fetch
+ * 0 = load
+ * 1 = store
+ *
+ * Registers $9 through $15 are saved in a block just prior to `regs' and
+ * are saved and restored around the call to allow exception code to
+ * modify them.
+ */
+
+/* Macro for exception fixup code to access integer registers. */
+#define dpf_reg(r) \
+ (((unsigned long *)regs)[(r) <= 8 ? (r) : (r) <= 15 ? (r)-16 : \
+ (r) <= 18 ? (r)+8 : (r)-10])
+
+asmlinkage void
+do_page_fault(unsigned long address, unsigned long mmcsr,
+ long cause, struct pt_regs *regs)
+{
+ struct vm_area_struct * vma;
+ struct mm_struct *mm = current->mm;
+ const struct exception_table_entry *fixup;
+ int fault, si_code = SEGV_MAPERR;
+ siginfo_t info;
+
+ /* As of EV6, a load into $31/$f31 is a prefetch, and never faults
+ (or is suppressed by the PALcode). Support that for older CPUs
+ by ignoring such an instruction. */
+ if (cause == 0) {
+ unsigned int insn;
+ __get_user(insn, (unsigned int __user *)regs->pc);
+ if ((insn >> 21 & 0x1f) == 0x1f &&
+ /* ldq ldl ldt lds ldg ldf ldwu ldbu */
+ (1ul << (insn >> 26) & 0x30f00001400ul)) {
+ regs->pc += 4;
+ return;
+ }
+ }
+
+ /* If we're in an interrupt context, or have no user context,
+ we must not take the fault. */
+ if (!mm || in_interrupt())
+ goto no_context;
+
+#ifdef CONFIG_ALPHA_LARGE_VMALLOC
+ if (address >= TASK_SIZE)
+ goto vmalloc_fault;
+#endif
+
+ down_read(&mm->mmap_sem);
+ vma = find_vma(mm, address);
+ if (!vma)
+ goto bad_area;
+ if (vma->vm_start <= address)
+ goto good_area;
+ if (!(vma->vm_flags & VM_GROWSDOWN))
+ goto bad_area;
+ if (expand_stack(vma, address))
+ goto bad_area;
+
+ /* Ok, we have a good vm_area for this memory access, so
+ we can handle it. */
+ good_area:
+ si_code = SEGV_ACCERR;
+ if (cause < 0) {
+ if (!(vma->vm_flags & VM_EXEC))
+ goto bad_area;
+ } else if (!cause) {
+ /* Allow reads even for write-only mappings */
+ if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
+ goto bad_area;
+ } else {
+ if (!(vma->vm_flags & VM_WRITE))
+ goto bad_area;
+ }
+
+ survive:
+ /* If for any reason at all we couldn't handle the fault,
+ make sure we exit gracefully rather than endlessly redo
+ the fault. */
+ fault = handle_mm_fault(mm, vma, address, cause > 0);
+ up_read(&mm->mmap_sem);
+
+ switch (fault) {
+ case VM_FAULT_MINOR:
+ current->min_flt++;
+ break;
+ case VM_FAULT_MAJOR:
+ current->maj_flt++;
+ break;
+ case VM_FAULT_SIGBUS:
+ goto do_sigbus;
+ case VM_FAULT_OOM:
+ goto out_of_memory;
+ default:
+ BUG();
+ }
+ return;
+
+ /* Something tried to access memory that isn't in our memory map.
+ Fix it, but check if it's kernel or user first. */
+ bad_area:
+ up_read(&mm->mmap_sem);
+
+ if (user_mode(regs))
+ goto do_sigsegv;
+
+ no_context:
+ /* Are we prepared to handle this fault as an exception? */
+ if ((fixup = search_exception_tables(regs->pc)) != 0) {
+ unsigned long newpc;
+ newpc = fixup_exception(dpf_reg, fixup, regs->pc);
+ regs->pc = newpc;
+ return;
+ }
+
+ /* Oops. The kernel tried to access some bad page. We'll have to
+ terminate things with extreme prejudice. */
+ printk(KERN_ALERT "Unable to handle kernel paging request at "
+ "virtual address %016lx\n", address);
+ die_if_kernel("Oops", regs, cause, (unsigned long*)regs - 16);
+ do_exit(SIGKILL);
+
+ /* We ran out of memory, or some other thing happened to us that
+ made us unable to handle the page fault gracefully. */
+ out_of_memory:
+ if (current->pid == 1) {
+ yield();
+ down_read(&mm->mmap_sem);
+ goto survive;
+ }
+ printk(KERN_ALERT "VM: killing process %s(%d)\n",
+ current->comm, current->pid);
+ if (!user_mode(regs))
+ goto no_context;
+ do_exit(SIGKILL);
+
+ do_sigbus:
+ /* Send a sigbus, regardless of whether we were in kernel
+ or user mode. */
+ info.si_signo = SIGBUS;
+ info.si_errno = 0;
+ info.si_code = BUS_ADRERR;
+ info.si_addr = (void __user *) address;
+ force_sig_info(SIGBUS, &info, current);
+ if (!user_mode(regs))
+ goto no_context;
+ return;
+
+ do_sigsegv:
+ info.si_signo = SIGSEGV;
+ info.si_errno = 0;
+ info.si_code = si_code;
+ info.si_addr = (void __user *) address;
+ force_sig_info(SIGSEGV, &info, current);
+ return;
+
+#ifdef CONFIG_ALPHA_LARGE_VMALLOC
+ vmalloc_fault:
+ if (user_mode(regs))
+ goto do_sigsegv;
+ else {
+ /* Synchronize this task's top level page-table
+ with the "reference" page table from init. */
+ long index = pgd_index(address);
+ pgd_t *pgd, *pgd_k;
+
+ pgd = current->active_mm->pgd + index;
+ pgd_k = swapper_pg_dir + index;
+ if (!pgd_present(*pgd) && pgd_present(*pgd_k)) {
+ pgd_val(*pgd) = pgd_val(*pgd_k);
+ return;
+ }
+ goto no_context;
+ }
+#endif
+}
diff --git a/arch/alpha/mm/init.c b/arch/alpha/mm/init.c
new file mode 100644
index 000000000000..90752f6d8867
--- /dev/null
+++ b/arch/alpha/mm/init.c
@@ -0,0 +1,382 @@
+/*
+ * linux/arch/alpha/mm/init.c
+ *
+ * Copyright (C) 1995 Linus Torvalds
+ */
+
+/* 2.3.x zone allocator, 1999 Andrea Arcangeli <andrea@suse.de> */
+
+#include <linux/config.h>
+#include <linux/signal.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/ptrace.h>
+#include <linux/mman.h>
+#include <linux/mm.h>
+#include <linux/swap.h>
+#include <linux/init.h>
+#include <linux/bootmem.h> /* max_low_pfn */
+#include <linux/vmalloc.h>
+
+#include <asm/system.h>
+#include <asm/uaccess.h>
+#include <asm/pgtable.h>
+#include <asm/pgalloc.h>
+#include <asm/hwrpb.h>
+#include <asm/dma.h>
+#include <asm/mmu_context.h>
+#include <asm/console.h>
+#include <asm/tlb.h>
+
+DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
+
+extern void die_if_kernel(char *,struct pt_regs *,long);
+
+static struct pcb_struct original_pcb;
+
+pgd_t *
+pgd_alloc(struct mm_struct *mm)
+{
+ pgd_t *ret, *init;
+
+ ret = (pgd_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
+ init = pgd_offset(&init_mm, 0UL);
+ if (ret) {
+#ifdef CONFIG_ALPHA_LARGE_VMALLOC
+ memcpy (ret + USER_PTRS_PER_PGD, init + USER_PTRS_PER_PGD,
+ (PTRS_PER_PGD - USER_PTRS_PER_PGD - 1)*sizeof(pgd_t));
+#else
+ pgd_val(ret[PTRS_PER_PGD-2]) = pgd_val(init[PTRS_PER_PGD-2]);
+#endif
+
+ /* The last PGD entry is the VPTB self-map. */
+ pgd_val(ret[PTRS_PER_PGD-1])
+ = pte_val(mk_pte(virt_to_page(ret), PAGE_KERNEL));
+ }
+ return ret;
+}
+
+pte_t *
+pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
+{
+ pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
+ return pte;
+}
+
+
+/*
+ * BAD_PAGE is the page that is used for page faults when linux
+ * is out-of-memory. Older versions of linux just did a
+ * do_exit(), but using this instead means there is less risk
+ * for a process dying in kernel mode, possibly leaving an inode
+ * unused etc..
+ *
+ * BAD_PAGETABLE is the accompanying page-table: it is initialized
+ * to point to BAD_PAGE entries.
+ *
+ * ZERO_PAGE is a special page that is used for zero-initialized
+ * data and COW.
+ */
+pmd_t *
+__bad_pagetable(void)
+{
+ memset((void *) EMPTY_PGT, 0, PAGE_SIZE);
+ return (pmd_t *) EMPTY_PGT;
+}
+
+pte_t
+__bad_page(void)
+{
+ memset((void *) EMPTY_PGE, 0, PAGE_SIZE);
+ return pte_mkdirty(mk_pte(virt_to_page(EMPTY_PGE), PAGE_SHARED));
+}
+
+#ifndef CONFIG_DISCONTIGMEM
+void
+show_mem(void)
+{
+ long i,free = 0,total = 0,reserved = 0;
+ long shared = 0, cached = 0;
+
+ printk("\nMem-info:\n");
+ show_free_areas();
+ printk("Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
+ i = max_mapnr;
+ while (i-- > 0) {
+ total++;
+ if (PageReserved(mem_map+i))
+ reserved++;
+ else if (PageSwapCache(mem_map+i))
+ cached++;
+ else if (!page_count(mem_map+i))
+ free++;
+ else
+ shared += page_count(mem_map + i) - 1;
+ }
+ printk("%ld pages of RAM\n",total);
+ printk("%ld free pages\n",free);
+ printk("%ld reserved pages\n",reserved);
+ printk("%ld pages shared\n",shared);
+ printk("%ld pages swap cached\n",cached);
+}
+#endif
+
+static inline unsigned long
+load_PCB(struct pcb_struct *pcb)
+{
+ register unsigned long sp __asm__("$30");
+ pcb->ksp = sp;
+ return __reload_thread(pcb);
+}
+
+/* Set up initial PCB, VPTB, and other such nicities. */
+
+static inline void
+switch_to_system_map(void)
+{
+ unsigned long newptbr;
+ unsigned long original_pcb_ptr;
+
+ /* Initialize the kernel's page tables. Linux puts the vptb in
+ the last slot of the L1 page table. */
+ memset(swapper_pg_dir, 0, PAGE_SIZE);
+ newptbr = ((unsigned long) swapper_pg_dir - PAGE_OFFSET) >> PAGE_SHIFT;
+ pgd_val(swapper_pg_dir[1023]) =
+ (newptbr << 32) | pgprot_val(PAGE_KERNEL);
+
+ /* Set the vptb. This is often done by the bootloader, but
+ shouldn't be required. */
+ if (hwrpb->vptb != 0xfffffffe00000000UL) {
+ wrvptptr(0xfffffffe00000000UL);
+ hwrpb->vptb = 0xfffffffe00000000UL;
+ hwrpb_update_checksum(hwrpb);
+ }
+
+ /* Also set up the real kernel PCB while we're at it. */
+ init_thread_info.pcb.ptbr = newptbr;
+ init_thread_info.pcb.flags = 1; /* set FEN, clear everything else */
+ original_pcb_ptr = load_PCB(&init_thread_info.pcb);
+ tbia();
+
+ /* Save off the contents of the original PCB so that we can
+ restore the original console's page tables for a clean reboot.
+
+ Note that the PCB is supposed to be a physical address, but
+ since KSEG values also happen to work, folks get confused.
+ Check this here. */
+
+ if (original_pcb_ptr < PAGE_OFFSET) {
+ original_pcb_ptr = (unsigned long)
+ phys_to_virt(original_pcb_ptr);
+ }
+ original_pcb = *(struct pcb_struct *) original_pcb_ptr;
+}
+
+int callback_init_done;
+
+void * __init
+callback_init(void * kernel_end)
+{
+ struct crb_struct * crb;
+ pgd_t *pgd;
+ pmd_t *pmd;
+ void *two_pages;
+
+ /* Starting at the HWRPB, locate the CRB. */
+ crb = (struct crb_struct *)((char *)hwrpb + hwrpb->crb_offset);
+
+ if (alpha_using_srm) {
+ /* Tell the console whither it is to be remapped. */
+ if (srm_fixup(VMALLOC_START, (unsigned long)hwrpb))
+ __halt(); /* "We're boned." --Bender */
+
+ /* Edit the procedure descriptors for DISPATCH and FIXUP. */
+ crb->dispatch_va = (struct procdesc_struct *)
+ (VMALLOC_START + (unsigned long)crb->dispatch_va
+ - crb->map[0].va);
+ crb->fixup_va = (struct procdesc_struct *)
+ (VMALLOC_START + (unsigned long)crb->fixup_va
+ - crb->map[0].va);
+ }
+
+ switch_to_system_map();
+
+ /* Allocate one PGD and one PMD. In the case of SRM, we'll need
+ these to actually remap the console. There is an assumption
+ here that only one of each is needed, and this allows for 8MB.
+ On systems with larger consoles, additional pages will be
+ allocated as needed during the mapping process.
+
+ In the case of not SRM, but not CONFIG_ALPHA_LARGE_VMALLOC,
+ we need to allocate the PGD we use for vmalloc before we start
+ forking other tasks. */
+
+ two_pages = (void *)
+ (((unsigned long)kernel_end + ~PAGE_MASK) & PAGE_MASK);
+ kernel_end = two_pages + 2*PAGE_SIZE;
+ memset(two_pages, 0, 2*PAGE_SIZE);
+
+ pgd = pgd_offset_k(VMALLOC_START);
+ pgd_set(pgd, (pmd_t *)two_pages);
+ pmd = pmd_offset(pgd, VMALLOC_START);
+ pmd_set(pmd, (pte_t *)(two_pages + PAGE_SIZE));
+
+ if (alpha_using_srm) {
+ static struct vm_struct console_remap_vm;
+ unsigned long vaddr = VMALLOC_START;
+ unsigned long i, j;
+
+ /* Set up the third level PTEs and update the virtual
+ addresses of the CRB entries. */
+ for (i = 0; i < crb->map_entries; ++i) {
+ unsigned long pfn = crb->map[i].pa >> PAGE_SHIFT;
+ crb->map[i].va = vaddr;
+ for (j = 0; j < crb->map[i].count; ++j) {
+ /* Newer console's (especially on larger
+ systems) may require more pages of
+ PTEs. Grab additional pages as needed. */
+ if (pmd != pmd_offset(pgd, vaddr)) {
+ memset(kernel_end, 0, PAGE_SIZE);
+ pmd = pmd_offset(pgd, vaddr);
+ pmd_set(pmd, (pte_t *)kernel_end);
+ kernel_end += PAGE_SIZE;
+ }
+ set_pte(pte_offset_kernel(pmd, vaddr),
+ pfn_pte(pfn, PAGE_KERNEL));
+ pfn++;
+ vaddr += PAGE_SIZE;
+ }
+ }
+
+ /* Let vmalloc know that we've allocated some space. */
+ console_remap_vm.flags = VM_ALLOC;
+ console_remap_vm.addr = (void *) VMALLOC_START;
+ console_remap_vm.size = vaddr - VMALLOC_START;
+ vmlist = &console_remap_vm;
+ }
+
+ callback_init_done = 1;
+ return kernel_end;
+}
+
+
+#ifndef CONFIG_DISCONTIGMEM
+/*
+ * paging_init() sets up the memory map.
+ */
+void
+paging_init(void)
+{
+ unsigned long zones_size[MAX_NR_ZONES] = {0, 0, 0};
+ unsigned long dma_pfn, high_pfn;
+
+ dma_pfn = virt_to_phys((char *)MAX_DMA_ADDRESS) >> PAGE_SHIFT;
+ high_pfn = max_pfn = max_low_pfn;
+
+ if (dma_pfn >= high_pfn)
+ zones_size[ZONE_DMA] = high_pfn;
+ else {
+ zones_size[ZONE_DMA] = dma_pfn;
+ zones_size[ZONE_NORMAL] = high_pfn - dma_pfn;
+ }
+
+ /* Initialize mem_map[]. */
+ free_area_init(zones_size);
+
+ /* Initialize the kernel's ZERO_PGE. */
+ memset((void *)ZERO_PGE, 0, PAGE_SIZE);
+}
+#endif /* CONFIG_DISCONTIGMEM */
+
+#if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_SRM)
+void
+srm_paging_stop (void)
+{
+ /* Move the vptb back to where the SRM console expects it. */
+ swapper_pg_dir[1] = swapper_pg_dir[1023];
+ tbia();
+ wrvptptr(0x200000000UL);
+ hwrpb->vptb = 0x200000000UL;
+ hwrpb_update_checksum(hwrpb);
+
+ /* Reload the page tables that the console had in use. */
+ load_PCB(&original_pcb);
+ tbia();
+}
+#endif
+
+#ifndef CONFIG_DISCONTIGMEM
+static void __init
+printk_memory_info(void)
+{
+ unsigned long codesize, reservedpages, datasize, initsize, tmp;
+ extern int page_is_ram(unsigned long) __init;
+ extern char _text, _etext, _data, _edata;
+ extern char __init_begin, __init_end;
+
+ /* printk all informations */
+ reservedpages = 0;
+ for (tmp = 0; tmp < max_low_pfn; tmp++)
+ /*
+ * Only count reserved RAM pages
+ */
+ if (page_is_ram(tmp) && PageReserved(mem_map+tmp))
+ reservedpages++;
+
+ codesize = (unsigned long) &_etext - (unsigned long) &_text;
+ datasize = (unsigned long) &_edata - (unsigned long) &_data;
+ initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
+
+ printk("Memory: %luk/%luk available (%luk kernel code, %luk reserved, %luk data, %luk init)\n",
+ (unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
+ max_mapnr << (PAGE_SHIFT-10),
+ codesize >> 10,
+ reservedpages << (PAGE_SHIFT-10),
+ datasize >> 10,
+ initsize >> 10);
+}
+
+void __init
+mem_init(void)
+{
+ max_mapnr = num_physpages = max_low_pfn;
+ totalram_pages += free_all_bootmem();
+ high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
+
+ printk_memory_info();
+}
+#endif /* CONFIG_DISCONTIGMEM */
+
+void
+free_reserved_mem(void *start, void *end)
+{
+ void *__start = start;
+ for (; __start < end; __start += PAGE_SIZE) {
+ ClearPageReserved(virt_to_page(__start));
+ set_page_count(virt_to_page(__start), 1);
+ free_page((long)__start);
+ totalram_pages++;
+ }
+}
+
+void
+free_initmem(void)
+{
+ extern char __init_begin, __init_end;
+
+ free_reserved_mem(&__init_begin, &__init_end);
+ printk ("Freeing unused kernel memory: %ldk freed\n",
+ (&__init_end - &__init_begin) >> 10);
+}
+
+#ifdef CONFIG_BLK_DEV_INITRD
+void
+free_initrd_mem(unsigned long start, unsigned long end)
+{
+ free_reserved_mem((void *)start, (void *)end);
+ printk ("Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
+}
+#endif
diff --git a/arch/alpha/mm/numa.c b/arch/alpha/mm/numa.c
new file mode 100644
index 000000000000..ba81c4422aaf
--- /dev/null
+++ b/arch/alpha/mm/numa.c
@@ -0,0 +1,395 @@
+/*
+ * linux/arch/alpha/mm/numa.c
+ *
+ * DISCONTIGMEM NUMA alpha support.
+ *
+ * Copyright (C) 2001 Andrea Arcangeli <andrea@suse.de> SuSE
+ */
+
+#include <linux/config.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/bootmem.h>
+#include <linux/swap.h>
+#include <linux/initrd.h>
+
+#include <asm/hwrpb.h>
+#include <asm/pgalloc.h>
+
+pg_data_t node_data[MAX_NUMNODES];
+bootmem_data_t node_bdata[MAX_NUMNODES];
+
+#undef DEBUG_DISCONTIG
+#ifdef DEBUG_DISCONTIG
+#define DBGDCONT(args...) printk(args)
+#else
+#define DBGDCONT(args...)
+#endif
+
+#define PFN_UP(x) (((x) + PAGE_SIZE-1) >> PAGE_SHIFT)
+#define PFN_DOWN(x) ((x) >> PAGE_SHIFT)
+#define PFN_PHYS(x) ((x) << PAGE_SHIFT)
+#define for_each_mem_cluster(memdesc, cluster, i) \
+ for ((cluster) = (memdesc)->cluster, (i) = 0; \
+ (i) < (memdesc)->numclusters; (i)++, (cluster)++)
+
+static void __init show_mem_layout(void)
+{
+ struct memclust_struct * cluster;
+ struct memdesc_struct * memdesc;
+ int i;
+
+ /* Find free clusters, and init and free the bootmem accordingly. */
+ memdesc = (struct memdesc_struct *)
+ (hwrpb->mddt_offset + (unsigned long) hwrpb);
+
+ printk("Raw memory layout:\n");
+ for_each_mem_cluster(memdesc, cluster, i) {
+ printk(" memcluster %2d, usage %1lx, start %8lu, end %8lu\n",
+ i, cluster->usage, cluster->start_pfn,
+ cluster->start_pfn + cluster->numpages);
+ }
+}
+
+static void __init
+setup_memory_node(int nid, void *kernel_end)
+{
+ extern unsigned long mem_size_limit;
+ struct memclust_struct * cluster;
+ struct memdesc_struct * memdesc;
+ unsigned long start_kernel_pfn, end_kernel_pfn;
+ unsigned long bootmap_size, bootmap_pages, bootmap_start;
+ unsigned long start, end;
+ unsigned long node_pfn_start, node_pfn_end;
+ unsigned long node_min_pfn, node_max_pfn;
+ int i;
+ unsigned long node_datasz = PFN_UP(sizeof(pg_data_t));
+ int show_init = 0;
+
+ /* Find the bounds of current node */
+ node_pfn_start = (node_mem_start(nid)) >> PAGE_SHIFT;
+ node_pfn_end = node_pfn_start + (node_mem_size(nid) >> PAGE_SHIFT);
+
+ /* Find free clusters, and init and free the bootmem accordingly. */
+ memdesc = (struct memdesc_struct *)
+ (hwrpb->mddt_offset + (unsigned long) hwrpb);
+
+ /* find the bounds of this node (node_min_pfn/node_max_pfn) */
+ node_min_pfn = ~0UL;
+ node_max_pfn = 0UL;
+ for_each_mem_cluster(memdesc, cluster, i) {
+ /* Bit 0 is console/PALcode reserved. Bit 1 is
+ non-volatile memory -- we might want to mark
+ this for later. */
+ if (cluster->usage & 3)
+ continue;
+
+ start = cluster->start_pfn;
+ end = start + cluster->numpages;
+
+ if (start >= node_pfn_end || end <= node_pfn_start)
+ continue;
+
+ if (!show_init) {
+ show_init = 1;
+ printk("Initializing bootmem allocator on Node ID %d\n", nid);
+ }
+ printk(" memcluster %2d, usage %1lx, start %8lu, end %8lu\n",
+ i, cluster->usage, cluster->start_pfn,
+ cluster->start_pfn + cluster->numpages);
+
+ if (start < node_pfn_start)
+ start = node_pfn_start;
+ if (end > node_pfn_end)
+ end = node_pfn_end;
+
+ if (start < node_min_pfn)
+ node_min_pfn = start;
+ if (end > node_max_pfn)
+ node_max_pfn = end;
+ }
+
+ if (mem_size_limit && node_max_pfn > mem_size_limit) {
+ static int msg_shown = 0;
+ if (!msg_shown) {
+ msg_shown = 1;
+ printk("setup: forcing memory size to %ldK (from %ldK).\n",
+ mem_size_limit << (PAGE_SHIFT - 10),
+ node_max_pfn << (PAGE_SHIFT - 10));
+ }
+ node_max_pfn = mem_size_limit;
+ }
+
+ if (node_min_pfn >= node_max_pfn)
+ return;
+
+ /* Update global {min,max}_low_pfn from node information. */
+ if (node_min_pfn < min_low_pfn)
+ min_low_pfn = node_min_pfn;
+ if (node_max_pfn > max_low_pfn)
+ max_pfn = max_low_pfn = node_max_pfn;
+
+ num_physpages += node_max_pfn - node_min_pfn;
+
+#if 0 /* we'll try this one again in a little while */
+ /* Cute trick to make sure our local node data is on local memory */
+ node_data[nid] = (pg_data_t *)(__va(node_min_pfn << PAGE_SHIFT));
+#endif
+ /* Quasi-mark the pg_data_t as in-use */
+ node_min_pfn += node_datasz;
+ if (node_min_pfn >= node_max_pfn) {
+ printk(" not enough mem to reserve NODE_DATA");
+ return;
+ }
+ NODE_DATA(nid)->bdata = &node_bdata[nid];
+
+ printk(" Detected node memory: start %8lu, end %8lu\n",
+ node_min_pfn, node_max_pfn);
+
+ DBGDCONT(" DISCONTIG: node_data[%d] is at 0x%p\n", nid, NODE_DATA(nid));
+ DBGDCONT(" DISCONTIG: NODE_DATA(%d)->bdata is at 0x%p\n", nid, NODE_DATA(nid)->bdata);
+
+ /* Find the bounds of kernel memory. */
+ start_kernel_pfn = PFN_DOWN(KERNEL_START_PHYS);
+ end_kernel_pfn = PFN_UP(virt_to_phys(kernel_end));
+ bootmap_start = -1;
+
+ if (!nid && (node_max_pfn < end_kernel_pfn || node_min_pfn > start_kernel_pfn))
+ panic("kernel loaded out of ram");
+
+ /* Zone start phys-addr must be 2^(MAX_ORDER-1) aligned.
+ Note that we round this down, not up - node memory
+ has much larger alignment than 8Mb, so it's safe. */
+ node_min_pfn &= ~((1UL << (MAX_ORDER-1))-1);
+
+ /* We need to know how many physically contiguous pages
+ we'll need for the bootmap. */
+ bootmap_pages = bootmem_bootmap_pages(node_max_pfn-node_min_pfn);
+
+ /* Now find a good region where to allocate the bootmap. */
+ for_each_mem_cluster(memdesc, cluster, i) {
+ if (cluster->usage & 3)
+ continue;
+
+ start = cluster->start_pfn;
+ end = start + cluster->numpages;
+
+ if (start >= node_max_pfn || end <= node_min_pfn)
+ continue;
+
+ if (end > node_max_pfn)
+ end = node_max_pfn;
+ if (start < node_min_pfn)
+ start = node_min_pfn;
+
+ if (start < start_kernel_pfn) {
+ if (end > end_kernel_pfn
+ && end - end_kernel_pfn >= bootmap_pages) {
+ bootmap_start = end_kernel_pfn;
+ break;
+ } else if (end > start_kernel_pfn)
+ end = start_kernel_pfn;
+ } else if (start < end_kernel_pfn)
+ start = end_kernel_pfn;
+ if (end - start >= bootmap_pages) {
+ bootmap_start = start;
+ break;
+ }
+ }
+
+ if (bootmap_start == -1)
+ panic("couldn't find a contigous place for the bootmap");
+
+ /* Allocate the bootmap and mark the whole MM as reserved. */
+ bootmap_size = init_bootmem_node(NODE_DATA(nid), bootmap_start,
+ node_min_pfn, node_max_pfn);
+ DBGDCONT(" bootmap_start %lu, bootmap_size %lu, bootmap_pages %lu\n",
+ bootmap_start, bootmap_size, bootmap_pages);
+
+ /* Mark the free regions. */
+ for_each_mem_cluster(memdesc, cluster, i) {
+ if (cluster->usage & 3)
+ continue;
+
+ start = cluster->start_pfn;
+ end = cluster->start_pfn + cluster->numpages;
+
+ if (start >= node_max_pfn || end <= node_min_pfn)
+ continue;
+
+ if (end > node_max_pfn)
+ end = node_max_pfn;
+ if (start < node_min_pfn)
+ start = node_min_pfn;
+
+ if (start < start_kernel_pfn) {
+ if (end > end_kernel_pfn) {
+ free_bootmem_node(NODE_DATA(nid), PFN_PHYS(start),
+ (PFN_PHYS(start_kernel_pfn)
+ - PFN_PHYS(start)));
+ printk(" freeing pages %ld:%ld\n",
+ start, start_kernel_pfn);
+ start = end_kernel_pfn;
+ } else if (end > start_kernel_pfn)
+ end = start_kernel_pfn;
+ } else if (start < end_kernel_pfn)
+ start = end_kernel_pfn;
+ if (start >= end)
+ continue;
+
+ free_bootmem_node(NODE_DATA(nid), PFN_PHYS(start), PFN_PHYS(end) - PFN_PHYS(start));
+ printk(" freeing pages %ld:%ld\n", start, end);
+ }
+
+ /* Reserve the bootmap memory. */
+ reserve_bootmem_node(NODE_DATA(nid), PFN_PHYS(bootmap_start), bootmap_size);
+ printk(" reserving pages %ld:%ld\n", bootmap_start, bootmap_start+PFN_UP(bootmap_size));
+
+ node_set_online(nid);
+}
+
+void __init
+setup_memory(void *kernel_end)
+{
+ int nid;
+
+ show_mem_layout();
+
+ nodes_clear(node_online_map);
+
+ min_low_pfn = ~0UL;
+ max_low_pfn = 0UL;
+ for (nid = 0; nid < MAX_NUMNODES; nid++)
+ setup_memory_node(nid, kernel_end);
+
+#ifdef CONFIG_BLK_DEV_INITRD
+ initrd_start = INITRD_START;
+ if (initrd_start) {
+ extern void *move_initrd(unsigned long);
+
+ initrd_end = initrd_start+INITRD_SIZE;
+ printk("Initial ramdisk at: 0x%p (%lu bytes)\n",
+ (void *) initrd_start, INITRD_SIZE);
+
+ if ((void *)initrd_end > phys_to_virt(PFN_PHYS(max_low_pfn))) {
+ if (!move_initrd(PFN_PHYS(max_low_pfn)))
+ printk("initrd extends beyond end of memory "
+ "(0x%08lx > 0x%p)\ndisabling initrd\n",
+ initrd_end,
+ phys_to_virt(PFN_PHYS(max_low_pfn)));
+ } else {
+ nid = kvaddr_to_nid(initrd_start);
+ reserve_bootmem_node(NODE_DATA(nid),
+ virt_to_phys((void *)initrd_start),
+ INITRD_SIZE);
+ }
+ }
+#endif /* CONFIG_BLK_DEV_INITRD */
+}
+
+void __init paging_init(void)
+{
+ unsigned int nid;
+ unsigned long zones_size[MAX_NR_ZONES] = {0, };
+ unsigned long dma_local_pfn;
+
+ /*
+ * The old global MAX_DMA_ADDRESS per-arch API doesn't fit
+ * in the NUMA model, for now we convert it to a pfn and
+ * we interpret this pfn as a local per-node information.
+ * This issue isn't very important since none of these machines
+ * have legacy ISA slots anyways.
+ */
+ dma_local_pfn = virt_to_phys((char *)MAX_DMA_ADDRESS) >> PAGE_SHIFT;
+
+ for_each_online_node(nid) {
+ unsigned long start_pfn = node_bdata[nid].node_boot_start >> PAGE_SHIFT;
+ unsigned long end_pfn = node_bdata[nid].node_low_pfn;
+
+ if (dma_local_pfn >= end_pfn - start_pfn)
+ zones_size[ZONE_DMA] = end_pfn - start_pfn;
+ else {
+ zones_size[ZONE_DMA] = dma_local_pfn;
+ zones_size[ZONE_NORMAL] = (end_pfn - start_pfn) - dma_local_pfn;
+ }
+ free_area_init_node(nid, NODE_DATA(nid), zones_size, start_pfn, NULL);
+ }
+
+ /* Initialize the kernel's ZERO_PGE. */
+ memset((void *)ZERO_PGE, 0, PAGE_SIZE);
+}
+
+void __init mem_init(void)
+{
+ unsigned long codesize, reservedpages, datasize, initsize, pfn;
+ extern int page_is_ram(unsigned long) __init;
+ extern char _text, _etext, _data, _edata;
+ extern char __init_begin, __init_end;
+ unsigned long nid, i;
+ struct page * lmem_map;
+
+ high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT);
+
+ reservedpages = 0;
+ for_each_online_node(nid) {
+ /*
+ * This will free up the bootmem, ie, slot 0 memory
+ */
+ totalram_pages += free_all_bootmem_node(NODE_DATA(nid));
+
+ lmem_map = node_mem_map(nid);
+ pfn = NODE_DATA(nid)->node_start_pfn;
+ for (i = 0; i < node_spanned_pages(nid); i++, pfn++)
+ if (page_is_ram(pfn) && PageReserved(lmem_map+i))
+ reservedpages++;
+ }
+
+ codesize = (unsigned long) &_etext - (unsigned long) &_text;
+ datasize = (unsigned long) &_edata - (unsigned long) &_data;
+ initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
+
+ printk("Memory: %luk/%luk available (%luk kernel code, %luk reserved, "
+ "%luk data, %luk init)\n",
+ (unsigned long)nr_free_pages() << (PAGE_SHIFT-10),
+ num_physpages << (PAGE_SHIFT-10),
+ codesize >> 10,
+ reservedpages << (PAGE_SHIFT-10),
+ datasize >> 10,
+ initsize >> 10);
+#if 0
+ mem_stress();
+#endif
+}
+
+void
+show_mem(void)
+{
+ long i,free = 0,total = 0,reserved = 0;
+ long shared = 0, cached = 0;
+ int nid;
+
+ printk("\nMem-info:\n");
+ show_free_areas();
+ printk("Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
+ for_each_online_node(nid) {
+ struct page * lmem_map = node_mem_map(nid);
+ i = node_spanned_pages(nid);
+ while (i-- > 0) {
+ total++;
+ if (PageReserved(lmem_map+i))
+ reserved++;
+ else if (PageSwapCache(lmem_map+i))
+ cached++;
+ else if (!page_count(lmem_map+i))
+ free++;
+ else
+ shared += page_count(lmem_map + i) - 1;
+ }
+ }
+ printk("%ld pages of RAM\n",total);
+ printk("%ld free pages\n",free);
+ printk("%ld reserved pages\n",reserved);
+ printk("%ld pages shared\n",shared);
+ printk("%ld pages swap cached\n",cached);
+}
diff --git a/arch/alpha/mm/remap.c b/arch/alpha/mm/remap.c
new file mode 100644
index 000000000000..19817ad3d89b
--- /dev/null
+++ b/arch/alpha/mm/remap.c
@@ -0,0 +1,90 @@
+#include <linux/vmalloc.h>
+#include <asm/pgalloc.h>
+#include <asm/cacheflush.h>
+
+/* called with the page_table_lock held */
+static inline void
+remap_area_pte(pte_t * pte, unsigned long address, unsigned long size,
+ unsigned long phys_addr, unsigned long flags)
+{
+ unsigned long end;
+ unsigned long pfn;
+
+ address &= ~PMD_MASK;
+ end = address + size;
+ if (end > PMD_SIZE)
+ end = PMD_SIZE;
+ if (address >= end)
+ BUG();
+ pfn = phys_addr >> PAGE_SHIFT;
+ do {
+ if (!pte_none(*pte)) {
+ printk("remap_area_pte: page already exists\n");
+ BUG();
+ }
+ set_pte(pte, pfn_pte(pfn,
+ __pgprot(_PAGE_VALID | _PAGE_ASM |
+ _PAGE_KRE | _PAGE_KWE | flags)));
+ address += PAGE_SIZE;
+ pfn++;
+ pte++;
+ } while (address && (address < end));
+}
+
+/* called with the page_table_lock held */
+static inline int
+remap_area_pmd(pmd_t * pmd, unsigned long address, unsigned long size,
+ unsigned long phys_addr, unsigned long flags)
+{
+ unsigned long end;
+
+ address &= ~PGDIR_MASK;
+ end = address + size;
+ if (end > PGDIR_SIZE)
+ end = PGDIR_SIZE;
+ phys_addr -= address;
+ if (address >= end)
+ BUG();
+ do {
+ pte_t * pte = pte_alloc_kernel(&init_mm, pmd, address);
+ if (!pte)
+ return -ENOMEM;
+ remap_area_pte(pte, address, end - address,
+ address + phys_addr, flags);
+ address = (address + PMD_SIZE) & PMD_MASK;
+ pmd++;
+ } while (address && (address < end));
+ return 0;
+}
+
+int
+__alpha_remap_area_pages(unsigned long address, unsigned long phys_addr,
+ unsigned long size, unsigned long flags)
+{
+ pgd_t * dir;
+ int error = 0;
+ unsigned long end = address + size;
+
+ phys_addr -= address;
+ dir = pgd_offset(&init_mm, address);
+ flush_cache_all();
+ if (address >= end)
+ BUG();
+ spin_lock(&init_mm.page_table_lock);
+ do {
+ pmd_t *pmd;
+ pmd = pmd_alloc(&init_mm, dir, address);
+ error = -ENOMEM;
+ if (!pmd)
+ break;
+ if (remap_area_pmd(pmd, address, end - address,
+ phys_addr + address, flags))
+ break;
+ error = 0;
+ address = (address + PGDIR_SIZE) & PGDIR_MASK;
+ dir++;
+ } while (address && (address < end));
+ spin_unlock(&init_mm.page_table_lock);
+ return error;
+}
+
diff --git a/arch/alpha/oprofile/Kconfig b/arch/alpha/oprofile/Kconfig
new file mode 100644
index 000000000000..5ade19801b97
--- /dev/null
+++ b/arch/alpha/oprofile/Kconfig
@@ -0,0 +1,23 @@
+
+menu "Profiling support"
+ depends on EXPERIMENTAL
+
+config PROFILING
+ bool "Profiling support (EXPERIMENTAL)"
+ help
+ Say Y here to enable the extended profiling support mechanisms used
+ by profilers such as OProfile.
+
+
+config OPROFILE
+ tristate "OProfile system profiling (EXPERIMENTAL)"
+ depends on PROFILING
+ help
+ OProfile is a profiling system capable of profiling the
+ whole system, include the kernel, kernel modules, libraries,
+ and applications.
+
+ If unsure, say N.
+
+endmenu
+
diff --git a/arch/alpha/oprofile/Makefile b/arch/alpha/oprofile/Makefile
new file mode 100644
index 000000000000..4aa56247bdc6
--- /dev/null
+++ b/arch/alpha/oprofile/Makefile
@@ -0,0 +1,19 @@
+EXTRA_CFLAGS := -Werror -Wno-sign-compare
+
+obj-$(CONFIG_OPROFILE) += oprofile.o
+
+DRIVER_OBJS = $(addprefix ../../../drivers/oprofile/, \
+ oprof.o cpu_buffer.o buffer_sync.o \
+ event_buffer.o oprofile_files.o \
+ oprofilefs.o oprofile_stats.o \
+ timer_int.o )
+
+oprofile-y := $(DRIVER_OBJS) common.o
+oprofile-$(CONFIG_ALPHA_GENERIC) += op_model_ev4.o \
+ op_model_ev5.o \
+ op_model_ev6.o \
+ op_model_ev67.o
+oprofile-$(CONFIG_ALPHA_EV4) += op_model_ev4.o
+oprofile-$(CONFIG_ALPHA_EV5) += op_model_ev5.o
+oprofile-$(CONFIG_ALPHA_EV6) += op_model_ev6.o \
+ op_model_ev67.o
diff --git a/arch/alpha/oprofile/common.c b/arch/alpha/oprofile/common.c
new file mode 100644
index 000000000000..908eb4af8dec
--- /dev/null
+++ b/arch/alpha/oprofile/common.c
@@ -0,0 +1,189 @@
+/**
+ * @file arch/alpha/oprofile/common.c
+ *
+ * @remark Copyright 2002 OProfile authors
+ * @remark Read the file COPYING
+ *
+ * @author Richard Henderson <rth@twiddle.net>
+ */
+
+#include <linux/oprofile.h>
+#include <linux/init.h>
+#include <linux/smp.h>
+#include <linux/errno.h>
+#include <asm/ptrace.h>
+#include <asm/system.h>
+
+#include "op_impl.h"
+
+extern struct op_axp_model op_model_ev4 __attribute__((weak));
+extern struct op_axp_model op_model_ev5 __attribute__((weak));
+extern struct op_axp_model op_model_pca56 __attribute__((weak));
+extern struct op_axp_model op_model_ev6 __attribute__((weak));
+extern struct op_axp_model op_model_ev67 __attribute__((weak));
+
+static struct op_axp_model *model;
+
+extern void (*perf_irq)(unsigned long, struct pt_regs *);
+static void (*save_perf_irq)(unsigned long, struct pt_regs *);
+
+static struct op_counter_config ctr[20];
+static struct op_system_config sys;
+static struct op_register_config reg;
+
+/* Called from do_entInt to handle the performance monitor interrupt. */
+
+static void
+op_handle_interrupt(unsigned long which, struct pt_regs *regs)
+{
+ model->handle_interrupt(which, regs, ctr);
+
+ /* If the user has selected an interrupt frequency that is
+ not exactly the width of the counter, write a new value
+ into the counter such that it'll overflow after N more
+ events. */
+ if ((reg.need_reset >> which) & 1)
+ model->reset_ctr(&reg, which);
+}
+
+static int
+op_axp_setup(void)
+{
+ unsigned long i, e;
+
+ /* Install our interrupt handler into the existing hook. */
+ save_perf_irq = perf_irq;
+ perf_irq = op_handle_interrupt;
+
+ /* Compute the mask of enabled counters. */
+ for (i = e = 0; i < model->num_counters; ++i)
+ if (ctr[i].enabled)
+ e |= 1 << i;
+ reg.enable = e;
+
+ /* Pre-compute the values to stuff in the hardware registers. */
+ model->reg_setup(&reg, ctr, &sys);
+
+ /* Configure the registers on all cpus. */
+ smp_call_function(model->cpu_setup, &reg, 0, 1);
+ model->cpu_setup(&reg);
+ return 0;
+}
+
+static void
+op_axp_shutdown(void)
+{
+ /* Remove our interrupt handler. We may be removing this module. */
+ perf_irq = save_perf_irq;
+}
+
+static void
+op_axp_cpu_start(void *dummy)
+{
+ wrperfmon(1, reg.enable);
+}
+
+static int
+op_axp_start(void)
+{
+ smp_call_function(op_axp_cpu_start, NULL, 0, 1);
+ op_axp_cpu_start(NULL);
+ return 0;
+}
+
+static inline void
+op_axp_cpu_stop(void *dummy)
+{
+ /* Disable performance monitoring for all counters. */
+ wrperfmon(0, -1);
+}
+
+static void
+op_axp_stop(void)
+{
+ smp_call_function(op_axp_cpu_stop, NULL, 0, 1);
+ op_axp_cpu_stop(NULL);
+}
+
+static int
+op_axp_create_files(struct super_block * sb, struct dentry * root)
+{
+ int i;
+
+ for (i = 0; i < model->num_counters; ++i) {
+ struct dentry *dir;
+ char buf[3];
+
+ snprintf(buf, sizeof buf, "%d", i);
+ dir = oprofilefs_mkdir(sb, root, buf);
+
+ oprofilefs_create_ulong(sb, dir, "enabled", &ctr[i].enabled);
+ oprofilefs_create_ulong(sb, dir, "event", &ctr[i].event);
+ oprofilefs_create_ulong(sb, dir, "count", &ctr[i].count);
+ /* Dummies. */
+ oprofilefs_create_ulong(sb, dir, "kernel", &ctr[i].kernel);
+ oprofilefs_create_ulong(sb, dir, "user", &ctr[i].user);
+ oprofilefs_create_ulong(sb, dir, "unit_mask", &ctr[i].unit_mask);
+ }
+
+ if (model->can_set_proc_mode) {
+ oprofilefs_create_ulong(sb, root, "enable_pal",
+ &sys.enable_pal);
+ oprofilefs_create_ulong(sb, root, "enable_kernel",
+ &sys.enable_kernel);
+ oprofilefs_create_ulong(sb, root, "enable_user",
+ &sys.enable_user);
+ }
+
+ return 0;
+}
+
+int __init
+oprofile_arch_init(struct oprofile_operations *ops)
+{
+ struct op_axp_model *lmodel = NULL;
+
+ switch (implver()) {
+ case IMPLVER_EV4:
+ lmodel = &op_model_ev4;
+ break;
+ case IMPLVER_EV5:
+ /* 21164PC has a slightly different set of events.
+ Recognize the chip by the presence of the MAX insns. */
+ if (!amask(AMASK_MAX))
+ lmodel = &op_model_pca56;
+ else
+ lmodel = &op_model_ev5;
+ break;
+ case IMPLVER_EV6:
+ /* 21264A supports ProfileMe.
+ Recognize the chip by the presence of the CIX insns. */
+ if (!amask(AMASK_CIX))
+ lmodel = &op_model_ev67;
+ else
+ lmodel = &op_model_ev6;
+ break;
+ }
+
+ if (!lmodel)
+ return -ENODEV;
+ model = lmodel;
+
+ ops->create_files = op_axp_create_files;
+ ops->setup = op_axp_setup;
+ ops->shutdown = op_axp_shutdown;
+ ops->start = op_axp_start;
+ ops->stop = op_axp_stop;
+ ops->cpu_type = lmodel->cpu_type;
+
+ printk(KERN_INFO "oprofile: using %s performance monitoring.\n",
+ lmodel->cpu_type);
+
+ return 0;
+}
+
+
+void
+oprofile_arch_exit(void)
+{
+}
diff --git a/arch/alpha/oprofile/op_impl.h b/arch/alpha/oprofile/op_impl.h
new file mode 100644
index 000000000000..6b97893c1a80
--- /dev/null
+++ b/arch/alpha/oprofile/op_impl.h
@@ -0,0 +1,55 @@
+/**
+ * @file arch/alpha/oprofile/op_impl.h
+ *
+ * @remark Copyright 2002 OProfile authors
+ * @remark Read the file COPYING
+ *
+ * @author Richard Henderson <rth@twiddle.net>
+ */
+
+#ifndef OP_IMPL_H
+#define OP_IMPL_H 1
+
+/* Per-counter configuration as set via oprofilefs. */
+struct op_counter_config {
+ unsigned long enabled;
+ unsigned long event;
+ unsigned long count;
+ /* Dummies because I am too lazy to hack the userspace tools. */
+ unsigned long kernel;
+ unsigned long user;
+ unsigned long unit_mask;
+};
+
+/* System-wide configuration as set via oprofilefs. */
+struct op_system_config {
+ unsigned long enable_pal;
+ unsigned long enable_kernel;
+ unsigned long enable_user;
+};
+
+/* Cached values for the various performance monitoring registers. */
+struct op_register_config {
+ unsigned long enable;
+ unsigned long mux_select;
+ unsigned long proc_mode;
+ unsigned long freq;
+ unsigned long reset_values;
+ unsigned long need_reset;
+};
+
+/* Per-architecture configury and hooks. */
+struct op_axp_model {
+ void (*reg_setup) (struct op_register_config *,
+ struct op_counter_config *,
+ struct op_system_config *);
+ void (*cpu_setup) (void *);
+ void (*reset_ctr) (struct op_register_config *, unsigned long);
+ void (*handle_interrupt) (unsigned long, struct pt_regs *,
+ struct op_counter_config *);
+ char *cpu_type;
+ unsigned char num_counters;
+ unsigned char can_set_proc_mode;
+};
+
+#endif
diff --git a/arch/alpha/oprofile/op_model_ev4.c b/arch/alpha/oprofile/op_model_ev4.c
new file mode 100644
index 000000000000..80d764dbf22f
--- /dev/null
+++ b/arch/alpha/oprofile/op_model_ev4.c
@@ -0,0 +1,116 @@
+/**
+ * @file arch/alpha/oprofile/op_model_ev4.c
+ *
+ * @remark Copyright 2002 OProfile authors
+ * @remark Read the file COPYING
+ *
+ * @author Richard Henderson <rth@twiddle.net>
+ */
+
+#include <linux/oprofile.h>
+#include <linux/init.h>
+#include <linux/smp.h>
+#include <asm/ptrace.h>
+#include <asm/system.h>
+
+#include "op_impl.h"
+
+
+/* Compute all of the registers in preparation for enabling profiling. */
+
+static void
+ev4_reg_setup(struct op_register_config *reg,
+ struct op_counter_config *ctr,
+ struct op_system_config *sys)
+{
+ unsigned long ctl = 0, count, hilo;
+
+ /* Select desired events. We've mapped the event numbers
+ such that they fit directly into the event selection fields.
+
+ Note that there is no "off" setting. In both cases we select
+ the EXTERNAL event source, hoping that it'll be the lowest
+ frequency, and set the frequency counter to LOW. The interrupts
+ for these "disabled" counter overflows are ignored by the
+ interrupt handler.
+
+ This is most irritating, because the hardware *can* enable and
+ disable the interrupts for these counters independently, but the
+ wrperfmon interface doesn't allow it. */
+
+ ctl |= (ctr[0].enabled ? ctr[0].event << 8 : 14 << 8);
+ ctl |= (ctr[1].enabled ? (ctr[1].event - 16) << 32 : 7ul << 32);
+
+ /* EV4 can not read or write its counter registers. The only
+ thing one can do at all is see if you overflow and get an
+ interrupt. We can set the width of the counters, to some
+ extent. Take the interrupt count selected by the user,
+ map it onto one of the possible values, and write it back. */
+
+ count = ctr[0].count;
+ if (count <= 4096)
+ count = 4096, hilo = 1;
+ else
+ count = 65536, hilo = 0;
+ ctr[0].count = count;
+ ctl |= (ctr[0].enabled && hilo) << 3;
+
+ count = ctr[1].count;
+ if (count <= 256)
+ count = 256, hilo = 1;
+ else
+ count = 4096, hilo = 0;
+ ctr[1].count = count;
+ ctl |= (ctr[1].enabled && hilo);
+
+ reg->mux_select = ctl;
+
+ /* Select performance monitoring options. */
+ /* ??? Need to come up with some mechanism to trace only
+ selected processes. EV4 does not have a mechanism to
+ select kernel or user mode only. For now, enable always. */
+ reg->proc_mode = 0;
+
+ /* Frequency is folded into mux_select for EV4. */
+ reg->freq = 0;
+
+ /* See above regarding no writes. */
+ reg->reset_values = 0;
+ reg->need_reset = 0;
+
+}
+
+/* Program all of the registers in preparation for enabling profiling. */
+
+static void
+ev4_cpu_setup(void *x)
+{
+ struct op_register_config *reg = x;
+
+ wrperfmon(2, reg->mux_select);
+ wrperfmon(3, reg->proc_mode);
+}
+
+static void
+ev4_handle_interrupt(unsigned long which, struct pt_regs *regs,
+ struct op_counter_config *ctr)
+{
+ /* EV4 can't properly disable counters individually.
+ Discard "disabled" events now. */
+ if (!ctr[which].enabled)
+ return;
+
+ /* Record the sample. */
+ oprofile_add_sample(regs, which);
+}
+
+
+struct op_axp_model op_model_ev4 = {
+ .reg_setup = ev4_reg_setup,
+ .cpu_setup = ev4_cpu_setup,
+ .reset_ctr = NULL,
+ .handle_interrupt = ev4_handle_interrupt,
+ .cpu_type = "alpha/ev4",
+ .num_counters = 2,
+ .can_set_proc_mode = 0,
+};
diff --git a/arch/alpha/oprofile/op_model_ev5.c b/arch/alpha/oprofile/op_model_ev5.c
new file mode 100644
index 000000000000..ceea6e1ad79a
--- /dev/null
+++ b/arch/alpha/oprofile/op_model_ev5.c
@@ -0,0 +1,211 @@
+/**
+ * @file arch/alpha/oprofile/op_model_ev5.c
+ *
+ * @remark Copyright 2002 OProfile authors
+ * @remark Read the file COPYING
+ *
+ * @author Richard Henderson <rth@twiddle.net>
+ */
+
+#include <linux/oprofile.h>
+#include <linux/init.h>
+#include <linux/smp.h>
+#include <asm/ptrace.h>
+#include <asm/system.h>
+
+#include "op_impl.h"
+
+
+/* Compute all of the registers in preparation for enabling profiling.
+
+ The 21164 (EV5) and 21164PC (PCA65) vary in the bit placement and
+ meaning of the "CBOX" events. Given that we don't care about meaning
+ at this point, arrange for the difference in bit placement to be
+ handled by common code. */
+
+static void
+common_reg_setup(struct op_register_config *reg,
+ struct op_counter_config *ctr,
+ struct op_system_config *sys,
+ int cbox1_ofs, int cbox2_ofs)
+{
+ int i, ctl, reset, need_reset;
+
+ /* Select desired events. The event numbers are selected such
+ that they map directly into the event selection fields:
+
+ PCSEL0: 0, 1
+ PCSEL1: 24-39
+ CBOX1: 40-47
+ PCSEL2: 48-63
+ CBOX2: 64-71
+
+ There are two special cases, in that CYCLES can be measured
+ on PCSEL[02], and SCACHE_WRITE can be measured on CBOX[12].
+ These event numbers are canonicalizes to their first appearance. */
+
+ ctl = 0;
+ for (i = 0; i < 3; ++i) {
+ unsigned long event = ctr[i].event;
+ if (!ctr[i].enabled)
+ continue;
+
+ /* Remap the duplicate events, as described above. */
+ if (i == 2) {
+ if (event == 0)
+ event = 12+48;
+ else if (event == 2+41)
+ event = 4+65;
+ }
+
+ /* Convert the event numbers onto mux_select bit mask. */
+ if (event < 2)
+ ctl |= event << 31;
+ else if (event < 24)
+ /* error */;
+ else if (event < 40)
+ ctl |= (event - 24) << 4;
+ else if (event < 48)
+ ctl |= (event - 40) << cbox1_ofs | 15 << 4;
+ else if (event < 64)
+ ctl |= event - 48;
+ else if (event < 72)
+ ctl |= (event - 64) << cbox2_ofs | 15;
+ }
+ reg->mux_select = ctl;
+
+ /* Select processor mode. */
+ /* ??? Need to come up with some mechanism to trace only selected
+ processes. For now select from pal, kernel and user mode. */
+ ctl = 0;
+ ctl |= !sys->enable_pal << 9;
+ ctl |= !sys->enable_kernel << 8;
+ ctl |= !sys->enable_user << 30;
+ reg->proc_mode = ctl;
+
+ /* Select interrupt frequencies. Take the interrupt count selected
+ by the user, and map it onto one of the possible counter widths.
+ If the user value is in between, compute a value to which the
+ counter is reset at each interrupt. */
+
+ ctl = reset = need_reset = 0;
+ for (i = 0; i < 3; ++i) {
+ unsigned long max, hilo, count = ctr[i].count;
+ if (!ctr[i].enabled)
+ continue;
+
+ if (count <= 256)
+ count = 256, hilo = 3, max = 256;
+ else {
+ max = (i == 2 ? 16384 : 65536);
+ hilo = 2;
+ if (count > max)
+ count = max;
+ }
+ ctr[i].count = count;
+
+ ctl |= hilo << (8 - i*2);
+ reset |= (max - count) << (48 - 16*i);
+ if (count != max)
+ need_reset |= 1 << i;
+ }
+ reg->freq = ctl;
+ reg->reset_values = reset;
+ reg->need_reset = need_reset;
+}
+
+static void
+ev5_reg_setup(struct op_register_config *reg,
+ struct op_counter_config *ctr,
+ struct op_system_config *sys)
+{
+ common_reg_setup(reg, ctr, sys, 19, 22);
+}
+
+static void
+pca56_reg_setup(struct op_register_config *reg,
+ struct op_counter_config *ctr,
+ struct op_system_config *sys)
+{
+ common_reg_setup(reg, ctr, sys, 8, 11);
+}
+
+/* Program all of the registers in preparation for enabling profiling. */
+
+static void
+ev5_cpu_setup (void *x)
+{
+ struct op_register_config *reg = x;
+
+ wrperfmon(2, reg->mux_select);
+ wrperfmon(3, reg->proc_mode);
+ wrperfmon(4, reg->freq);
+ wrperfmon(6, reg->reset_values);
+}
+
+/* CTR is a counter for which the user has requested an interrupt count
+ in between one of the widths selectable in hardware. Reset the count
+ for CTR to the value stored in REG->RESET_VALUES.
+
+ For EV5, this means disabling profiling, reading the current values,
+ masking in the value for the desired register, writing, then turning
+ profiling back on.
+
+ This can be streamlined if profiling is only enabled for user mode.
+ In that case we know that the counters are not currently incrementing
+ (due to being in kernel mode). */
+
+static void
+ev5_reset_ctr(struct op_register_config *reg, unsigned long ctr)
+{
+ unsigned long values, mask, not_pk, reset_values;
+
+ mask = (ctr == 0 ? 0xfffful << 48
+ : ctr == 1 ? 0xfffful << 32
+ : 0x3fff << 16);
+
+ not_pk = 1 << 9 | 1 << 8;
+
+ reset_values = reg->reset_values;
+
+ if ((reg->proc_mode & not_pk) == not_pk) {
+ values = wrperfmon(5, 0);
+ values = (reset_values & mask) | (values & ~mask & -2);
+ wrperfmon(6, values);
+ } else {
+ wrperfmon(0, -1);
+ values = wrperfmon(5, 0);
+ values = (reset_values & mask) | (values & ~mask & -2);
+ wrperfmon(6, values);
+ wrperfmon(1, reg->enable);
+ }
+}
+
+static void
+ev5_handle_interrupt(unsigned long which, struct pt_regs *regs,
+ struct op_counter_config *ctr)
+{
+ /* Record the sample. */
+ oprofile_add_sample(regs, which);
+}
+
+
+struct op_axp_model op_model_ev5 = {
+ .reg_setup = ev5_reg_setup,
+ .cpu_setup = ev5_cpu_setup,
+ .reset_ctr = ev5_reset_ctr,
+ .handle_interrupt = ev5_handle_interrupt,
+ .cpu_type = "alpha/ev5",
+ .num_counters = 3,
+ .can_set_proc_mode = 1,
+};
+
+struct op_axp_model op_model_pca56 = {
+ .reg_setup = pca56_reg_setup,
+ .cpu_setup = ev5_cpu_setup,
+ .reset_ctr = ev5_reset_ctr,
+ .handle_interrupt = ev5_handle_interrupt,
+ .cpu_type = "alpha/pca56",
+ .num_counters = 3,
+ .can_set_proc_mode = 1,
+};
diff --git a/arch/alpha/oprofile/op_model_ev6.c b/arch/alpha/oprofile/op_model_ev6.c
new file mode 100644
index 000000000000..0869f85f5748
--- /dev/null
+++ b/arch/alpha/oprofile/op_model_ev6.c
@@ -0,0 +1,103 @@
+/**
+ * @file arch/alpha/oprofile/op_model_ev6.c
+ *
+ * @remark Copyright 2002 OProfile authors
+ * @remark Read the file COPYING
+ *
+ * @author Richard Henderson <rth@twiddle.net>
+ */
+
+#include <linux/oprofile.h>
+#include <linux/init.h>
+#include <linux/smp.h>
+#include <asm/ptrace.h>
+#include <asm/system.h>
+
+#include "op_impl.h"
+
+
+/* Compute all of the registers in preparation for enabling profiling. */
+
+static void
+ev6_reg_setup(struct op_register_config *reg,
+ struct op_counter_config *ctr,
+ struct op_system_config *sys)
+{
+ unsigned long ctl, reset, need_reset, i;
+
+ /* Select desired events. We've mapped the event numbers
+ such that they fit directly into the event selection fields. */
+ ctl = 0;
+ if (ctr[0].enabled && ctr[0].event)
+ ctl |= (ctr[0].event & 1) << 4;
+ if (ctr[1].enabled)
+ ctl |= (ctr[1].event - 2) & 15;
+ reg->mux_select = ctl;
+
+ /* Select logging options. */
+ /* ??? Need to come up with some mechanism to trace only
+ selected processes. EV6 does not have a mechanism to
+ select kernel or user mode only. For now, enable always. */
+ reg->proc_mode = 0;
+
+ /* EV6 cannot change the width of the counters as with the
+ other implementations. But fortunately, we can write to
+ the counters and set the value such that it will overflow
+ at the right time. */
+ reset = need_reset = 0;
+ for (i = 0; i < 2; ++i) {
+ unsigned long count = ctr[i].count;
+ if (!ctr[i].enabled)
+ continue;
+
+ if (count > 0x100000)
+ count = 0x100000;
+ ctr[i].count = count;
+ reset |= (0x100000 - count) << (i ? 6 : 28);
+ if (count != 0x100000)
+ need_reset |= 1 << i;
+ }
+ reg->reset_values = reset;
+ reg->need_reset = need_reset;
+}
+
+/* Program all of the registers in preparation for enabling profiling. */
+
+static void
+ev6_cpu_setup (void *x)
+{
+ struct op_register_config *reg = x;
+
+ wrperfmon(2, reg->mux_select);
+ wrperfmon(3, reg->proc_mode);
+ wrperfmon(6, reg->reset_values | 3);
+}
+
+/* CTR is a counter for which the user has requested an interrupt count
+ in between one of the widths selectable in hardware. Reset the count
+ for CTR to the value stored in REG->RESET_VALUES. */
+
+static void
+ev6_reset_ctr(struct op_register_config *reg, unsigned long ctr)
+{
+ wrperfmon(6, reg->reset_values | (1 << ctr));
+}
+
+static void
+ev6_handle_interrupt(unsigned long which, struct pt_regs *regs,
+ struct op_counter_config *ctr)
+{
+ /* Record the sample. */
+ oprofile_add_sample(regs, which);
+}
+
+
+struct op_axp_model op_model_ev6 = {
+ .reg_setup = ev6_reg_setup,
+ .cpu_setup = ev6_cpu_setup,
+ .reset_ctr = ev6_reset_ctr,
+ .handle_interrupt = ev6_handle_interrupt,
+ .cpu_type = "alpha/ev6",
+ .num_counters = 2,
+ .can_set_proc_mode = 0,
+};
diff --git a/arch/alpha/oprofile/op_model_ev67.c b/arch/alpha/oprofile/op_model_ev67.c
new file mode 100644
index 000000000000..70302086283c
--- /dev/null
+++ b/arch/alpha/oprofile/op_model_ev67.c
@@ -0,0 +1,263 @@
+/**
+ * @file arch/alpha/oprofile/op_model_ev67.c
+ *
+ * @remark Copyright 2002 OProfile authors
+ * @remark Read the file COPYING
+ *
+ * @author Richard Henderson <rth@twiddle.net>
+ * @author Falk Hueffner <falk@debian.org>
+ */
+
+#include <linux/oprofile.h>
+#include <linux/init.h>
+#include <linux/smp.h>
+#include <asm/ptrace.h>
+#include <asm/system.h>
+
+#include "op_impl.h"
+
+
+/* Compute all of the registers in preparation for enabling profiling. */
+
+static void
+ev67_reg_setup(struct op_register_config *reg,
+ struct op_counter_config *ctr,
+ struct op_system_config *sys)
+{
+ unsigned long ctl, reset, need_reset, i;
+
+ /* Select desired events. */
+ ctl = 1UL << 4; /* Enable ProfileMe mode. */
+
+ /* The event numbers are chosen so we can use them directly if
+ PCTR1 is enabled. */
+ if (ctr[1].enabled) {
+ ctl |= (ctr[1].event & 3) << 2;
+ } else {
+ if (ctr[0].event == 0) /* cycles */
+ ctl |= 1UL << 2;
+ }
+ reg->mux_select = ctl;
+
+ /* Select logging options. */
+ /* ??? Need to come up with some mechanism to trace only
+ selected processes. EV67 does not have a mechanism to
+ select kernel or user mode only. For now, enable always. */
+ reg->proc_mode = 0;
+
+ /* EV67 cannot change the width of the counters as with the
+ other implementations. But fortunately, we can write to
+ the counters and set the value such that it will overflow
+ at the right time. */
+ reset = need_reset = 0;
+ for (i = 0; i < 2; ++i) {
+ unsigned long count = ctr[i].count;
+ if (!ctr[i].enabled)
+ continue;
+
+ if (count > 0x100000)
+ count = 0x100000;
+ ctr[i].count = count;
+ reset |= (0x100000 - count) << (i ? 6 : 28);
+ if (count != 0x100000)
+ need_reset |= 1 << i;
+ }
+ reg->reset_values = reset;
+ reg->need_reset = need_reset;
+}
+
+/* Program all of the registers in preparation for enabling profiling. */
+
+static void
+ev67_cpu_setup (void *x)
+{
+ struct op_register_config *reg = x;
+
+ wrperfmon(2, reg->mux_select);
+ wrperfmon(3, reg->proc_mode);
+ wrperfmon(6, reg->reset_values | 3);
+}
+
+/* CTR is a counter for which the user has requested an interrupt count
+ in between one of the widths selectable in hardware. Reset the count
+ for CTR to the value stored in REG->RESET_VALUES. */
+
+static void
+ev67_reset_ctr(struct op_register_config *reg, unsigned long ctr)
+{
+ wrperfmon(6, reg->reset_values | (1 << ctr));
+}
+
+/* ProfileMe conditions which will show up as counters. We can also
+ detect the following, but it seems unlikely that anybody is
+ interested in counting them:
+ * Reset
+ * MT_FPCR (write to floating point control register)
+ * Arithmetic trap
+ * Dstream Fault
+ * Machine Check (ECC fault, etc.)
+ * OPCDEC (illegal opcode)
+ * Floating point disabled
+ * Differentiate between DTB single/double misses and 3 or 4 level
+ page tables
+ * Istream access violation
+ * Interrupt
+ * Icache Parity Error.
+ * Instruction killed (nop, trapb)
+
+ Unfortunately, there seems to be no way to detect Dcache and Bcache
+ misses; the latter could be approximated by making the counter
+ count Bcache misses, but that is not precise.
+
+ We model this as 20 counters:
+ * PCTR0
+ * PCTR1
+ * 9 ProfileMe events, induced by PCTR0
+ * 9 ProfileMe events, induced by PCTR1
+*/
+
+enum profileme_counters {
+ PM_STALLED, /* Stalled for at least one cycle
+ between the fetch and map stages */
+ PM_TAKEN, /* Conditional branch taken */
+ PM_MISPREDICT, /* Branch caused mispredict trap */
+ PM_ITB_MISS, /* ITB miss */
+ PM_DTB_MISS, /* DTB miss */
+ PM_REPLAY, /* Replay trap */
+ PM_LOAD_STORE, /* Load-store order trap */
+ PM_ICACHE_MISS, /* Icache miss */
+ PM_UNALIGNED, /* Unaligned Load/Store */
+ PM_NUM_COUNTERS
+};
+
+static inline void
+op_add_pm(unsigned long pc, int kern, unsigned long counter,
+ struct op_counter_config *ctr, unsigned long event)
+{
+ unsigned long fake_counter = 2 + event;
+ if (counter == 1)
+ fake_counter += PM_NUM_COUNTERS;
+ if (ctr[fake_counter].enabled)
+ oprofile_add_pc(pc, kern, fake_counter);
+}
+
+static void
+ev67_handle_interrupt(unsigned long which, struct pt_regs *regs,
+ struct op_counter_config *ctr)
+{
+ unsigned long pmpc, pctr_ctl;
+ int kern = !user_mode(regs);
+ int mispredict = 0;
+ union {
+ unsigned long v;
+ struct {
+ unsigned reserved: 30; /* 0-29 */
+ unsigned overcount: 3; /* 30-32 */
+ unsigned icache_miss: 1; /* 33 */
+ unsigned trap_type: 4; /* 34-37 */
+ unsigned load_store: 1; /* 38 */
+ unsigned trap: 1; /* 39 */
+ unsigned mispredict: 1; /* 40 */
+ } fields;
+ } i_stat;
+
+ enum trap_types {
+ TRAP_REPLAY,
+ TRAP_INVALID0,
+ TRAP_DTB_DOUBLE_MISS_3,
+ TRAP_DTB_DOUBLE_MISS_4,
+ TRAP_FP_DISABLED,
+ TRAP_UNALIGNED,
+ TRAP_DTB_SINGLE_MISS,
+ TRAP_DSTREAM_FAULT,
+ TRAP_OPCDEC,
+ TRAP_INVALID1,
+ TRAP_MACHINE_CHECK,
+ TRAP_INVALID2,
+ TRAP_ARITHMETIC,
+ TRAP_INVALID3,
+ TRAP_MT_FPCR,
+ TRAP_RESET
+ };
+
+ pmpc = wrperfmon(9, 0);
+ /* ??? Don't know how to handle physical-mode PALcode address. */
+ if (pmpc & 1)
+ return;
+ pmpc &= ~2; /* clear reserved bit */
+
+ i_stat.v = wrperfmon(8, 0);
+ if (i_stat.fields.trap) {
+ switch (i_stat.fields.trap_type) {
+ case TRAP_INVALID1:
+ case TRAP_INVALID2:
+ case TRAP_INVALID3:
+ /* Pipeline redirection ocurred. PMPC points
+ to PALcode. Recognize ITB miss by PALcode
+ offset address, and get actual PC from
+ EXC_ADDR. */
+ oprofile_add_pc(regs->pc, kern, which);
+ if ((pmpc & ((1 << 15) - 1)) == 581)
+ op_add_pm(regs->pc, kern, which,
+ ctr, PM_ITB_MISS);
+ /* Most other bit and counter values will be
+ those for the first instruction in the
+ fault handler, so we're done. */
+ return;
+ case TRAP_REPLAY:
+ op_add_pm(pmpc, kern, which, ctr,
+ (i_stat.fields.load_store
+ ? PM_LOAD_STORE : PM_REPLAY));
+ break;
+ case TRAP_DTB_DOUBLE_MISS_3:
+ case TRAP_DTB_DOUBLE_MISS_4:
+ case TRAP_DTB_SINGLE_MISS:
+ op_add_pm(pmpc, kern, which, ctr, PM_DTB_MISS);
+ break;
+ case TRAP_UNALIGNED:
+ op_add_pm(pmpc, kern, which, ctr, PM_UNALIGNED);
+ break;
+ case TRAP_INVALID0:
+ case TRAP_FP_DISABLED:
+ case TRAP_DSTREAM_FAULT:
+ case TRAP_OPCDEC:
+ case TRAP_MACHINE_CHECK:
+ case TRAP_ARITHMETIC:
+ case TRAP_MT_FPCR:
+ case TRAP_RESET:
+ break;
+ }
+
+ /* ??? JSR/JMP/RET/COR or HW_JSR/HW_JMP/HW_RET/HW_COR
+ mispredicts do not set this bit but can be
+ recognized by the presence of one of these
+ instructions at the PMPC location with bit 39
+ set. */
+ if (i_stat.fields.mispredict) {
+ mispredict = 1;
+ op_add_pm(pmpc, kern, which, ctr, PM_MISPREDICT);
+ }
+ }
+
+ oprofile_add_pc(pmpc, kern, which);
+
+ pctr_ctl = wrperfmon(5, 0);
+ if (pctr_ctl & (1UL << 27))
+ op_add_pm(pmpc, kern, which, ctr, PM_STALLED);
+
+ /* Unfortunately, TAK is undefined on mispredicted branches.
+ ??? It is also undefined for non-cbranch insns, should
+ check that. */
+ if (!mispredict && pctr_ctl & (1UL << 0))
+ op_add_pm(pmpc, kern, which, ctr, PM_TAKEN);
+}
+
+struct op_axp_model op_model_ev67 = {
+ .reg_setup = ev67_reg_setup,
+ .cpu_setup = ev67_cpu_setup,
+ .reset_ctr = ev67_reset_ctr,
+ .handle_interrupt = ev67_handle_interrupt,
+ .cpu_type = "alpha/ev67",
+ .num_counters = 20,
+ .can_set_proc_mode = 0,
+};