summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/Kconfig1
-rw-r--r--drivers/acpi/ac.c20
-rw-r--r--drivers/acpi/battery.c34
-rw-r--r--drivers/acpi/blacklist.c16
-rw-r--r--drivers/acpi/bus.c91
-rw-r--r--drivers/acpi/glue.c40
-rw-r--r--drivers/acpi/osl.c25
-rw-r--r--drivers/acpi/pci_bind.c313
-rw-r--r--drivers/acpi/pci_irq.c17
-rw-r--r--drivers/acpi/pci_root.c297
-rw-r--r--drivers/acpi/power.c28
-rw-r--r--drivers/acpi/processor_core.c45
-rw-r--r--drivers/acpi/processor_idle.c47
-rw-r--r--drivers/acpi/scan.c69
-rw-r--r--drivers/acpi/video.c61
-rw-r--r--drivers/acpi/video_detect.c9
-rw-r--r--drivers/amba/bus.c12
-rw-r--r--drivers/ata/Kconfig8
-rw-r--r--drivers/ata/Makefile1
-rw-r--r--drivers/ata/libata-core.c25
-rw-r--r--drivers/ata/pata_at91.c361
-rw-r--r--drivers/ata/sata_fsl.c35
-rw-r--r--drivers/base/firmware_class.c6
-rw-r--r--drivers/base/power/main.c1
-rw-r--r--drivers/block/amiflop.c2
-rw-r--r--drivers/block/cciss.c15
-rw-r--r--drivers/block/cciss_cmd.h1
-rw-r--r--drivers/block/floppy.c5
-rw-r--r--drivers/block/pktcdvd.c10
-rw-r--r--drivers/block/xsysace.c7
-rw-r--r--drivers/char/Kconfig4
-rw-r--r--drivers/char/Makefile1
-rw-r--r--drivers/char/bfin_jtag_comm.c30
-rw-r--r--drivers/char/bsr.c42
-rw-r--r--drivers/char/hw_random/intel-rng.c9
-rw-r--r--drivers/char/isicom.c16
-rw-r--r--drivers/char/moxa.c7
-rw-r--r--drivers/char/mxser.c2
-rw-r--r--drivers/char/n_hdlc.c46
-rw-r--r--drivers/char/n_r3964.c26
-rw-r--r--drivers/char/nozomi.c2
-rw-r--r--drivers/char/pcmcia/cm4000_cs.c3
-rw-r--r--drivers/char/pty.c154
-rw-r--r--drivers/char/synclink_gt.c72
-rw-r--r--drivers/char/tb0219.c4
-rw-r--r--drivers/char/tty_ldisc.c15
-rw-r--r--drivers/char/tty_port.c2
-rw-r--r--drivers/char/vr41xx_giu.c680
-rw-r--r--drivers/char/vt_ioctl.c3
-rw-r--r--drivers/clocksource/sh_tmu.c2
-rw-r--r--drivers/cpufreq/cpufreq.c76
-rw-r--r--drivers/cpufreq/cpufreq_conservative.c49
-rw-r--r--drivers/cpufreq/cpufreq_ondemand.c77
-rw-r--r--drivers/crypto/padlock-aes.c138
-rw-r--r--drivers/dma/txx9dmac.c20
-rw-r--r--drivers/edac/amd64_edac.c25
-rw-r--r--drivers/edac/amd64_edac.h2
-rw-r--r--drivers/edac/edac_core.h4
-rw-r--r--drivers/edac/edac_mc_sysfs.c4
-rw-r--r--drivers/edac/mpc85xx_edac.c6
-rw-r--r--drivers/edac/mpc85xx_edac.h1
-rw-r--r--drivers/firewire/Kconfig60
-rw-r--r--drivers/firewire/Makefile4
-rw-r--r--drivers/firewire/core-card.c34
-rw-r--r--drivers/firewire/core-cdev.c4
-rw-r--r--drivers/firewire/core-iso.c35
-rw-r--r--drivers/firewire/core.h90
-rw-r--r--drivers/firewire/net.c1655
-rw-r--r--drivers/firewire/sbp2.c10
-rw-r--r--drivers/gpio/Kconfig6
-rw-r--r--drivers/gpio/Makefile1
-rw-r--r--drivers/gpio/pl061.c20
-rw-r--r--drivers/gpio/vr41xx_giu.c586
-rw-r--r--drivers/gpu/drm/Kconfig1
-rw-r--r--drivers/gpu/drm/Makefile2
-rw-r--r--drivers/gpu/drm/drm_crtc_helper.c2
-rw-r--r--drivers/gpu/drm/drm_edid.c12
-rw-r--r--drivers/gpu/drm/i915/Makefile2
-rw-r--r--drivers/gpu/drm/i915/dvo.h4
-rw-r--r--drivers/gpu/drm/i915/dvo_ch7017.c20
-rw-r--r--drivers/gpu/drm/i915/dvo_ch7xxx.c25
-rw-r--r--drivers/gpu/drm/i915/dvo_ivch.c21
-rw-r--r--drivers/gpu/drm/i915/dvo_sil164.c25
-rw-r--r--drivers/gpu/drm/i915/dvo_tfp410.c25
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c72
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c13
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h39
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c55
-rw-r--r--drivers/gpu/drm/i915/i915_gem_debug.c6
-rw-r--r--drivers/gpu/drm/i915/i915_gem_debugfs.c44
-rw-r--r--drivers/gpu/drm/i915/i915_gem_tiling.c8
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c155
-rw-r--r--drivers/gpu/drm/i915/i915_opregion.c2
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h81
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.c251
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c32
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c30
-rw-r--r--drivers/gpu/drm/i915/intel_display.c623
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c1156
-rw-r--r--drivers/gpu/drm/i915/intel_dp.h144
-rw-r--r--drivers/gpu/drm/i915/intel_dp_i2c.c273
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h17
-rw-r--r--drivers/gpu/drm/i915/intel_dvo.c16
-rw-r--r--drivers/gpu/drm/i915/intel_fb.c18
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c35
-rw-r--r--drivers/gpu/drm/i915/intel_i2c.c16
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c418
-rw-r--r--drivers/gpu/drm/i915/intel_modes.c14
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c186
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo_regs.h1
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c53
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c22
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_fb.c33
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c30
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c1
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_vm.c1
-rw-r--r--drivers/gpu/drm/ttm/ttm_tt.c1
-rw-r--r--drivers/i2c/busses/Kconfig10
-rw-r--r--drivers/i2c/busses/Makefile1
-rw-r--r--drivers/i2c/busses/i2c-cpm.c6
-rw-r--r--drivers/i2c/busses/i2c-designware.c624
-rw-r--r--drivers/i2c/busses/i2c-ibm_iic.c9
-rw-r--r--drivers/i2c/busses/i2c-omap.c2
-rw-r--r--drivers/ide/cmd64x.c3
-rw-r--r--drivers/ide/cs5520.c1
-rw-r--r--drivers/ide/ide-acpi.c37
-rw-r--r--drivers/ide/ide-cd.c24
-rw-r--r--drivers/ide/ide-devsets.c2
-rw-r--r--drivers/ide/ide-dma.c21
-rw-r--r--drivers/ide/ide-eh.c2
-rw-r--r--drivers/ide/ide-floppy.c2
-rw-r--r--drivers/ide/ide-io.c14
-rw-r--r--drivers/ide/ide-ioctls.c3
-rw-r--r--drivers/ide/ide-iops.c4
-rw-r--r--drivers/ide/ide-pm.c30
-rw-r--r--drivers/ide/ide-probe.c23
-rw-r--r--drivers/ieee1394/Kconfig19
-rw-r--r--drivers/ieee1394/sbp2.c1
-rw-r--r--drivers/ieee1394/sbp2.h8
-rw-r--r--drivers/infiniband/core/addr.c4
-rw-r--r--drivers/infiniband/core/cma.c4
-rw-r--r--drivers/infiniband/hw/ehca/ehca_hca.c2
-rw-r--r--drivers/infiniband/hw/ehca/ehca_main.c20
-rw-r--r--drivers/infiniband/hw/ehca/ehca_mrmw.c508
-rw-r--r--drivers/infiniband/hw/ehca/ehca_mrmw.h13
-rw-r--r--drivers/infiniband/hw/mthca/mthca_mr.c13
-rw-r--r--drivers/infiniband/hw/nes/nes_cm.c8
-rw-r--r--drivers/infiniband/hw/nes/nes_verbs.c2
-rw-r--r--drivers/input/keyboard/Kconfig294
-rw-r--r--drivers/input/keyboard/Makefile33
-rw-r--r--drivers/input/keyboard/gpio_keys.c33
-rw-r--r--drivers/input/keyboard/matrix_keypad.c453
-rw-r--r--drivers/input/misc/cobalt_btns.c4
-rw-r--r--drivers/input/mouse/gpio_mouse.c11
-rw-r--r--drivers/input/serio/i8042-x86ia64io.h28
-rw-r--r--drivers/input/serio/i8042.c7
-rw-r--r--drivers/input/serio/serio.c7
-rw-r--r--drivers/input/tablet/wacom_wac.c6
-rw-r--r--drivers/isdn/gigaset/ev-layer.c44
-rw-r--r--drivers/isdn/gigaset/isocdata.c6
-rw-r--r--drivers/leds/Kconfig14
-rw-r--r--drivers/leds/Makefile1
-rw-r--r--drivers/leds/leds-alix2.c7
-rw-r--r--drivers/leds/leds-bd2802.c96
-rw-r--r--drivers/leds/leds-cobalt-raq.c2
-rw-r--r--drivers/leds/leds-gpio.c22
-rw-r--r--drivers/leds/leds-lp3944.c466
-rw-r--r--drivers/leds/leds-pca9532.c58
-rw-r--r--drivers/lguest/lg.h2
-rw-r--r--drivers/lguest/lguest_user.c4
-rw-r--r--drivers/macintosh/macio_asic.c11
-rw-r--r--drivers/md/Kconfig30
-rw-r--r--drivers/md/Makefile5
-rw-r--r--drivers/md/dm-crypt.c21
-rw-r--r--drivers/md/dm-delay.c26
-rw-r--r--drivers/md/dm-exception-store.c9
-rw-r--r--drivers/md/dm-exception-store.h2
-rw-r--r--drivers/md/dm-io.c14
-rw-r--r--drivers/md/dm-ioctl.c27
-rw-r--r--drivers/md/dm-linear.c15
-rw-r--r--drivers/md/dm-log-userspace-base.c696
-rw-r--r--drivers/md/dm-log-userspace-transfer.c276
-rw-r--r--drivers/md/dm-log-userspace-transfer.h18
-rw-r--r--drivers/md/dm-log.c9
-rw-r--r--drivers/md/dm-mpath.c334
-rw-r--r--drivers/md/dm-path-selector.h8
-rw-r--r--drivers/md/dm-queue-length.c263
-rw-r--r--drivers/md/dm-raid1.c17
-rw-r--r--drivers/md/dm-region-hash.c2
-rw-r--r--drivers/md/dm-round-robin.c2
-rw-r--r--drivers/md/dm-service-time.c339
-rw-r--r--drivers/md/dm-snap-persistent.c2
-rw-r--r--drivers/md/dm-snap.c11
-rw-r--r--drivers/md/dm-stripe.c33
-rw-r--r--drivers/md/dm-sysfs.c9
-rw-r--r--drivers/md/dm-table.c465
-rw-r--r--drivers/md/dm.c1137
-rw-r--r--drivers/md/dm.h35
-rw-r--r--drivers/md/linear.c4
-rw-r--r--drivers/md/md.c74
-rw-r--r--drivers/md/multipath.c7
-rw-r--r--drivers/md/raid0.c9
-rw-r--r--drivers/md/raid1.c9
-rw-r--r--drivers/md/raid10.c19
-rw-r--r--drivers/md/raid5.c28
-rw-r--r--drivers/media/common/ir-keymaps.c23
-rw-r--r--drivers/media/common/tuners/tuner-xc2028.c13
-rw-r--r--drivers/media/dvb/frontends/stv0900.h7
-rw-r--r--drivers/media/dvb/frontends/stv0900_core.c100
-rw-r--r--drivers/media/dvb/frontends/stv0900_priv.h2
-rw-r--r--drivers/media/dvb/frontends/stv090x.c11
-rw-r--r--drivers/media/dvb/frontends/tda10048.c1
-rw-r--r--drivers/media/dvb/siano/smscoreapi.c4
-rw-r--r--drivers/media/dvb/ttpci/Kconfig1
-rw-r--r--drivers/media/radio/radio-si470x.c5
-rw-r--r--drivers/media/radio/radio-tea5764.c4
-rw-r--r--drivers/media/video/Kconfig14
-rw-r--r--drivers/media/video/Makefile1
-rw-r--r--drivers/media/video/cx18/cx18-cards.c34
-rw-r--r--drivers/media/video/cx18/cx18-controls.c2
-rw-r--r--drivers/media/video/cx18/cx18-dvb.c160
-rw-r--r--drivers/media/video/cx231xx/cx231xx-avcore.c19
-rw-r--r--drivers/media/video/cx231xx/cx231xx-video.c26
-rw-r--r--drivers/media/video/cx231xx/cx231xx.h3
-rw-r--r--drivers/media/video/cx2341x.c2
-rw-r--r--drivers/media/video/cx23885/cx23885-dvb.c63
-rw-r--r--drivers/media/video/cx23885/cx23885-video.c11
-rw-r--r--drivers/media/video/cx23885/cx23885.h4
-rw-r--r--drivers/media/video/cx88/cx88-cards.c94
-rw-r--r--drivers/media/video/cx88/cx88-video.c11
-rw-r--r--drivers/media/video/em28xx/Kconfig2
-rw-r--r--drivers/media/video/em28xx/em28xx-cards.c140
-rw-r--r--drivers/media/video/em28xx/em28xx-core.c32
-rw-r--r--drivers/media/video/em28xx/em28xx-dvb.c29
-rw-r--r--drivers/media/video/em28xx/em28xx-i2c.c2
-rw-r--r--drivers/media/video/em28xx/em28xx-video.c130
-rw-r--r--drivers/media/video/em28xx/em28xx.h4
-rw-r--r--drivers/media/video/gspca/gspca.c8
-rw-r--r--drivers/media/video/gspca/ov519.c981
-rw-r--r--drivers/media/video/gspca/sonixj.c181
-rw-r--r--drivers/media/video/gspca/stv06xx/Makefile3
-rw-r--r--drivers/media/video/gspca/stv06xx/stv06xx.c53
-rw-r--r--drivers/media/video/gspca/stv06xx/stv06xx.h15
-rw-r--r--drivers/media/video/gspca/stv06xx/stv06xx_hdcs.c10
-rw-r--r--drivers/media/video/gspca/stv06xx/stv06xx_sensor.h3
-rw-r--r--drivers/media/video/gspca/stv06xx/stv06xx_st6422.c453
-rw-r--r--drivers/media/video/gspca/stv06xx/stv06xx_st6422.h59
-rw-r--r--drivers/media/video/ivtv/ivtv-controls.c2
-rw-r--r--drivers/media/video/mt9m001.c12
-rw-r--r--drivers/media/video/mt9t031.c14
-rw-r--r--drivers/media/video/mt9v011.c431
-rw-r--r--drivers/media/video/mt9v011.h35
-rw-r--r--drivers/media/video/mt9v022.c12
-rw-r--r--drivers/media/video/ov511.c2
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-audio.c14
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-cs53l32a.c24
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-cx2584x-v4l.c37
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-hdw.c60
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-video-v4l.c35
-rw-r--r--drivers/media/video/pxa_camera.c34
-rw-r--r--drivers/media/video/saa7134/saa7134-video.c11
-rw-r--r--drivers/media/video/sh_mobile_ceu_camera.c12
-rw-r--r--drivers/media/video/soc_camera.c12
-rw-r--r--drivers/media/video/tcm825x.c4
-rw-r--r--drivers/media/video/usbvideo/Kconfig5
-rw-r--r--drivers/media/video/v4l2-common.c181
-rw-r--r--drivers/media/video/vivi.c110
-rw-r--r--drivers/media/video/w9968cf.c35
-rw-r--r--drivers/media/video/zoran/zoran_driver.c14
-rw-r--r--drivers/message/fusion/mptsas.c4
-rw-r--r--drivers/mfd/dm355evm_msp.c3
-rw-r--r--drivers/mfd/ezx-pcap.c4
-rw-r--r--drivers/mfd/sm501.c3
-rw-r--r--drivers/mfd/twl4030-core.c12
-rw-r--r--drivers/misc/sgi-xp/xpnet.c4
-rw-r--r--drivers/mmc/host/Kconfig36
-rw-r--r--drivers/mmc/host/Makefile2
-rw-r--r--drivers/mmc/host/mmc_spi.c6
-rw-r--r--drivers/mmc/host/s3cmci.c2
-rw-r--r--drivers/mmc/host/sdhci-of.c3
-rw-r--r--drivers/mmc/host/sdhci-pci.c20
-rw-r--r--drivers/mmc/host/sdhci-s3c.c428
-rw-r--r--drivers/mmc/host/sdhci.c52
-rw-r--r--drivers/mmc/host/sdhci.h6
-rw-r--r--drivers/mmc/host/via-sdmmc.c1362
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0001.c26
-rw-r--r--drivers/mtd/chips/jedec_probe.c13
-rw-r--r--drivers/mtd/cmdlinepart.c2
-rw-r--r--drivers/mtd/devices/m25p80.c6
-rw-r--r--drivers/mtd/inftlcore.c11
-rw-r--r--drivers/mtd/maps/Kconfig13
-rw-r--r--drivers/mtd/maps/Makefile1
-rw-r--r--drivers/mtd/maps/bfin-async-flash.c5
-rw-r--r--drivers/mtd/maps/integrator-flash.c232
-rw-r--r--drivers/mtd/maps/physmap.c40
-rw-r--r--drivers/mtd/maps/physmap_of.c199
-rw-r--r--drivers/mtd/maps/pmcmsp-ramroot.c104
-rw-r--r--drivers/mtd/maps/pxa2xx-flash.c22
-rw-r--r--drivers/mtd/maps/rbtx4939-flash.c23
-rw-r--r--drivers/mtd/maps/sa1100-flash.c23
-rw-r--r--drivers/mtd/maps/uclinux.c16
-rw-r--r--drivers/mtd/mtd_blkdevs.c2
-rw-r--r--drivers/mtd/mtdchar.c303
-rw-r--r--drivers/mtd/mtdcore.c47
-rw-r--r--drivers/mtd/mtdpart.c20
-rw-r--r--drivers/mtd/nand/Kconfig24
-rw-r--r--drivers/mtd/nand/Makefile1
-rw-r--r--drivers/mtd/nand/atmel_nand.c13
-rw-r--r--drivers/mtd/nand/bf5xx_nand.c17
-rw-r--r--drivers/mtd/nand/davinci_nand.c342
-rw-r--r--drivers/mtd/nand/mxc_nand.c66
-rw-r--r--drivers/mtd/nand/nand_base.c3
-rw-r--r--drivers/mtd/nand/nand_ecc.c4
-rw-r--r--drivers/mtd/nand/omap2.c779
-rw-r--r--drivers/mtd/nand/orion_nand.c23
-rw-r--r--drivers/mtd/nand/plat_nand.c19
-rw-r--r--drivers/mtd/nand/s3c2410.c268
-rw-r--r--drivers/mtd/nand/txx9ndfmc.c16
-rw-r--r--drivers/mtd/nftlcore.c16
-rw-r--r--drivers/mtd/onenand/omap2.c4
-rw-r--r--drivers/mtd/onenand/onenand_base.c862
-rw-r--r--drivers/mtd/onenand/onenand_bbt.c14
-rw-r--r--drivers/mtd/onenand/onenand_sim.c81
-rw-r--r--drivers/net/Kconfig12
-rw-r--r--drivers/net/Makefile1
-rw-r--r--drivers/net/a2065.c12
-rw-r--r--drivers/net/arcnet/arcnet.c26
-rw-r--r--drivers/net/atl1c/atl1c_ethtool.c2
-rw-r--r--drivers/net/atl1e/atl1e_ethtool.c2
-rw-r--r--drivers/net/benet/be.h104
-rw-r--r--drivers/net/benet/be_cmds.c368
-rw-r--r--drivers/net/benet/be_cmds.h80
-rw-r--r--drivers/net/benet/be_ethtool.c4
-rw-r--r--drivers/net/benet/be_hw.h12
-rw-r--r--drivers/net/benet/be_main.c360
-rw-r--r--drivers/net/bmac.c7
-rw-r--r--drivers/net/bnx2.c10
-rw-r--r--drivers/net/bnx2x.h2
-rw-r--r--drivers/net/bnx2x_main.c29
-rw-r--r--drivers/net/can/Kconfig2
-rw-r--r--drivers/net/cnic.c6
-rw-r--r--drivers/net/cnic_if.h2
-rw-r--r--drivers/net/cpmac.c2
-rw-r--r--drivers/net/cxgb3/cxgb3_main.c3
-rw-r--r--drivers/net/davinci_emac.c12
-rw-r--r--drivers/net/dl2k.c17
-rw-r--r--drivers/net/e1000/e1000_main.c11
-rw-r--r--drivers/net/e1000e/defines.h3
-rw-r--r--drivers/net/e1000e/hw.h4
-rw-r--r--drivers/net/e1000e/ich8lan.c270
-rw-r--r--drivers/net/e1000e/lib.c6
-rw-r--r--drivers/net/e1000e/netdev.c5
-rw-r--r--drivers/net/e1000e/phy.c12
-rw-r--r--drivers/net/epic100.c5
-rw-r--r--drivers/net/fealnx.c15
-rw-r--r--drivers/net/fec.h12
-rw-r--r--drivers/net/forcedeth.c25
-rw-r--r--drivers/net/fsl_pq_mdio.c8
-rw-r--r--drivers/net/hamachi.c23
-rw-r--r--drivers/net/hamradio/baycom_epp.c2
-rw-r--r--drivers/net/hamradio/baycom_par.c2
-rw-r--r--drivers/net/hamradio/baycom_ser_fdx.c2
-rw-r--r--drivers/net/hamradio/baycom_ser_hdx.c2
-rw-r--r--drivers/net/igb/e1000_82575.c4
-rw-r--r--drivers/net/igb/igb_main.c14
-rw-r--r--drivers/net/irda/bfin_sir.c16
-rw-r--r--drivers/net/ixgbe/ixgbe_dcb_nl.c6
-rw-r--r--drivers/net/ixgbe/ixgbe_ethtool.c6
-rw-r--r--drivers/net/ixgbe/ixgbe_main.c88
-rw-r--r--drivers/net/ixgbe/ixgbe_phy.c3
-rw-r--r--drivers/net/mdio.c4
-rw-r--r--drivers/net/mlx4/en_netdev.c8
-rw-r--r--drivers/net/mlx4/en_rx.c96
-rw-r--r--drivers/net/mlx4/en_tx.c29
-rw-r--r--drivers/net/mlx4/mlx4_en.h5
-rw-r--r--drivers/net/mlx4/mr.c14
-rw-r--r--drivers/net/mv643xx_eth.c7
-rw-r--r--drivers/net/natsemi.c4
-rw-r--r--drivers/net/ne.c2
-rw-r--r--drivers/net/netxen/netxen_nic.h17
-rw-r--r--drivers/net/netxen/netxen_nic_hdr.h1
-rw-r--r--drivers/net/netxen/netxen_nic_init.c148
-rw-r--r--drivers/net/netxen/netxen_nic_main.c11
-rw-r--r--drivers/net/pci-skeleton.c2
-rw-r--r--drivers/net/pcmcia/ibmtr_cs.c13
-rw-r--r--drivers/net/pcmcia/nmclan_cs.c15
-rw-r--r--drivers/net/pcmcia/pcnet_cs.c1
-rw-r--r--drivers/net/pcnet32.c50
-rw-r--r--drivers/net/phy/phy.c25
-rw-r--r--drivers/net/ppp_async.c1
-rw-r--r--drivers/net/ppp_synctty.c1
-rw-r--r--drivers/net/qla3xxx.c8
-rw-r--r--drivers/net/qlge/qlge.h2
-rw-r--r--drivers/net/qlge/qlge_ethtool.c4
-rw-r--r--drivers/net/qlge/qlge_main.c153
-rw-r--r--drivers/net/qlge/qlge_mpi.c6
-rw-r--r--drivers/net/r6040.c9
-rw-r--r--drivers/net/r8169.c3
-rw-r--r--drivers/net/s6gmac.c1073
-rw-r--r--drivers/net/sh_eth.c9
-rw-r--r--drivers/net/sky2.c2
-rw-r--r--drivers/net/smsc911x.c6
-rw-r--r--drivers/net/starfire.c2
-rw-r--r--drivers/net/sundance.c6
-rw-r--r--drivers/net/tsi108_eth.c8
-rw-r--r--drivers/net/tulip/de2104x.c7
-rw-r--r--drivers/net/tulip/tulip_core.c14
-rw-r--r--drivers/net/tulip/winbond-840.c6
-rw-r--r--drivers/net/tun.c20
-rw-r--r--drivers/net/usb/Kconfig4
-rw-r--r--drivers/net/usb/cdc_eem.c2
-rw-r--r--drivers/net/usb/cdc_ether.c25
-rw-r--r--drivers/net/usb/cdc_subset.c7
-rw-r--r--drivers/net/usb/dm9601.c10
-rw-r--r--drivers/net/usb/net1080.c12
-rw-r--r--drivers/net/usb/pegasus.c29
-rw-r--r--drivers/net/usb/rndis_host.c2
-rw-r--r--drivers/net/usb/smsc95xx.c10
-rw-r--r--drivers/net/usb/usbnet.c30
-rw-r--r--drivers/net/veth.c41
-rw-r--r--drivers/net/via-velocity.c2
-rw-r--r--drivers/net/wan/hd64570.c3
-rw-r--r--drivers/net/wan/hd64572.c3
-rw-r--r--drivers/net/wan/sbni.c8
-rw-r--r--drivers/net/wireless/ath/Kconfig1
-rw-r--r--drivers/net/wireless/ath/ath5k/base.c11
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/pci.c18
-rw-r--r--drivers/net/wireless/ath/ath9k/recv.c7
-rw-r--r--drivers/net/wireless/ath/ath9k/xmit.c9
-rw-r--r--drivers/net/wireless/b43/b43.h1
-rw-r--r--drivers/net/wireless/b43/main.c7
-rw-r--r--drivers/net/wireless/b43/pcmcia.c1
-rw-r--r--drivers/net/wireless/b43legacy/b43legacy.h1
-rw-r--r--drivers/net/wireless/b43legacy/main.c7
-rw-r--r--drivers/net/wireless/iwmc3200wifi/Kconfig9
-rw-r--r--drivers/net/wireless/iwmc3200wifi/iwm.h4
-rw-r--r--drivers/net/wireless/iwmc3200wifi/main.c64
-rw-r--r--drivers/net/wireless/iwmc3200wifi/netdev.c49
-rw-r--r--drivers/net/wireless/iwmc3200wifi/sdio.c11
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c1
-rw-r--r--drivers/net/wireless/p54/p54common.c51
-rw-r--r--drivers/net/wireless/ray_cs.c9
-rw-r--r--drivers/net/wireless/wavelan_cs.c13
-rw-r--r--drivers/net/wireless/zd1211rw/zd_usb.c4
-rw-r--r--drivers/net/yellowfin.c23
-rw-r--r--drivers/oprofile/oprofile_stats.c1
-rw-r--r--drivers/parisc/ccio-dma.c7
-rw-r--r--drivers/parisc/dino.c12
-rw-r--r--drivers/parisc/eisa.c2
-rw-r--r--drivers/parisc/eisa_enumerator.c14
-rw-r--r--drivers/parisc/gsc.c4
-rw-r--r--drivers/parisc/gsc.h2
-rw-r--r--drivers/parisc/iosapic.c2
-rw-r--r--drivers/parisc/lba_pci.c41
-rw-r--r--drivers/parisc/sba_iommu.c2
-rw-r--r--drivers/parisc/superio.c6
-rw-r--r--drivers/parport/parport_pc.c39
-rw-r--r--drivers/pci/Makefile3
-rw-r--r--drivers/pci/access.c19
-rw-r--r--drivers/pci/bus.c18
-rw-r--r--drivers/pci/dmar.c235
-rw-r--r--drivers/pci/hotplug/Kconfig4
-rw-r--r--drivers/pci/hotplug/acpi_pcihp.c40
-rw-r--r--drivers/pci/hotplug/acpiphp_core.c1
-rw-r--r--drivers/pci/hotplug/acpiphp_glue.c27
-rw-r--r--drivers/pci/hotplug/cpci_hotplug_core.c1
-rw-r--r--drivers/pci/hotplug/cpqphp.h167
-rw-r--r--drivers/pci/hotplug/cpqphp_core.c1100
-rw-r--r--drivers/pci/hotplug/cpqphp_ctrl.c371
-rw-r--r--drivers/pci/hotplug/cpqphp_nvram.c97
-rw-r--r--drivers/pci/hotplug/cpqphp_pci.c599
-rw-r--r--drivers/pci/hotplug/ibmphp_core.c2
-rw-r--r--drivers/pci/hotplug/pci_hotplug_core.c157
-rw-r--r--drivers/pci/hotplug/pciehp.h3
-rw-r--r--drivers/pci/hotplug/pciehp_core.c112
-rw-r--r--drivers/pci/hotplug/pciehp_hpc.c31
-rw-r--r--drivers/pci/hotplug/pcihp_skeleton.c1
-rw-r--r--drivers/pci/hotplug/rpaphp_core.c1
-rw-r--r--drivers/pci/hotplug/sgi_hotplug.c1
-rw-r--r--drivers/pci/hotplug/shpchp_core.c1
-rw-r--r--drivers/pci/intel-iommu.c1440
-rw-r--r--drivers/pci/intr_remapping.c168
-rw-r--r--drivers/pci/intr_remapping.h2
-rw-r--r--drivers/pci/iov.c161
-rw-r--r--drivers/pci/iova.c26
-rw-r--r--drivers/pci/msi.c160
-rw-r--r--drivers/pci/msi.h24
-rw-r--r--drivers/pci/pci.c259
-rw-r--r--drivers/pci/pci.h39
-rw-r--r--drivers/pci/pcie/aer/Kconfig15
-rw-r--r--drivers/pci/pcie/aer/Kconfig.debug18
-rw-r--r--drivers/pci/pcie/aer/Makefile3
-rw-r--r--drivers/pci/pcie/aer/aer_inject.c473
-rw-r--r--drivers/pci/pcie/aer/aerdrv.c3
-rw-r--r--drivers/pci/pcie/aer/aerdrv.h6
-rw-r--r--drivers/pci/pcie/aer/aerdrv_core.c278
-rw-r--r--drivers/pci/pcie/aer/ecrc.c131
-rw-r--r--drivers/pci/pcie/aspm.c787
-rw-r--r--drivers/pci/probe.c11
-rw-r--r--drivers/pci/quirks.c29
-rw-r--r--drivers/pci/remove.c2
-rw-r--r--drivers/pci/search.c32
-rw-r--r--drivers/pci/setup-bus.c53
-rw-r--r--drivers/pci/setup-res.c49
-rw-r--r--drivers/pci/slot.c39
-rw-r--r--drivers/pcmcia/tcic.c3
-rw-r--r--drivers/pcmcia/vrc4171_card.c4
-rw-r--r--drivers/pcmcia/vrc4173_cardu.c4
-rw-r--r--drivers/pcmcia/vrc4173_cardu.h2
-rw-r--r--drivers/platform/x86/Kconfig35
-rw-r--r--drivers/platform/x86/Makefile1
-rw-r--r--drivers/platform/x86/acer-wmi.c4
-rw-r--r--drivers/platform/x86/acerhdf.c602
-rw-r--r--drivers/platform/x86/asus-laptop.c111
-rw-r--r--drivers/platform/x86/asus_acpi.c30
-rw-r--r--drivers/platform/x86/dell-wmi.c56
-rw-r--r--drivers/platform/x86/eeepc-laptop.c500
-rw-r--r--drivers/platform/x86/hp-wmi.c89
-rw-r--r--drivers/platform/x86/thinkpad_acpi.c425
-rw-r--r--drivers/pnp/pnpacpi/rsparser.c46
-rw-r--r--drivers/power/Kconfig8
-rw-r--r--drivers/power/Makefile3
-rw-r--r--drivers/power/da9030_battery.c19
-rw-r--r--drivers/power/ds2760_battery.c42
-rw-r--r--drivers/power/max17040_battery.c309
-rw-r--r--drivers/rtc/rtc-bfin.c30
-rw-r--r--drivers/rtc/rtc-ds1374.c4
-rw-r--r--drivers/rtc/rtc-vr41xx.c4
-rw-r--r--drivers/s390/block/dasd.c20
-rw-r--r--drivers/s390/block/dasd_eckd.c10
-rw-r--r--drivers/s390/char/con3215.c22
-rw-r--r--drivers/s390/char/con3270.c13
-rw-r--r--drivers/s390/char/monreader.c6
-rw-r--r--drivers/s390/char/raw3270.c36
-rw-r--r--drivers/s390/char/sclp_con.c7
-rw-r--r--drivers/s390/char/sclp_vt220.c18
-rw-r--r--drivers/s390/char/tape_core.c2
-rw-r--r--drivers/s390/char/vmlogrdr.c4
-rw-r--r--drivers/s390/char/vmur.c2
-rw-r--r--drivers/s390/cio/qdio.h11
-rw-r--r--drivers/s390/cio/qdio_debug.c3
-rw-r--r--drivers/s390/cio/qdio_main.c144
-rw-r--r--drivers/s390/cio/qdio_thinint.c114
-rw-r--r--drivers/s390/crypto/ap_bus.c85
-rw-r--r--drivers/s390/net/netiucv.c4
-rw-r--r--drivers/scsi/Kconfig13
-rw-r--r--drivers/scsi/atari_NCR5380.c3
-rw-r--r--drivers/scsi/bnx2i/Kconfig2
-rw-r--r--drivers/scsi/cxgb3i/Kbuild2
-rw-r--r--drivers/scsi/cxgb3i/cxgb3i_ddp.c90
-rw-r--r--drivers/scsi/cxgb3i/cxgb3i_ddp.h2
-rw-r--r--drivers/scsi/cxgb3i/cxgb3i_iscsi.c4
-rw-r--r--drivers/scsi/fcoe/fcoe.c108
-rw-r--r--drivers/scsi/fcoe/fcoe.h24
-rw-r--r--drivers/scsi/fcoe/libfcoe.c94
-rw-r--r--drivers/scsi/fnic/fnic_main.c8
-rw-r--r--drivers/scsi/fnic/fnic_scsi.c7
-rw-r--r--drivers/scsi/hosts.c8
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.c36
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.h1
-rw-r--r--drivers/scsi/ibmvscsi/ibmvscsi.c7
-rw-r--r--drivers/scsi/ipr.c138
-rw-r--r--drivers/scsi/ipr.h10
-rw-r--r--drivers/scsi/iscsi_tcp.c14
-rw-r--r--drivers/scsi/libfc/fc_disc.c83
-rw-r--r--drivers/scsi/libfc/fc_exch.c58
-rw-r--r--drivers/scsi/libfc/fc_fcp.c97
-rw-r--r--drivers/scsi/libfc/fc_lport.c156
-rw-r--r--drivers/scsi/libfc/fc_rport.c120
-rw-r--r--drivers/scsi/libiscsi.c161
-rw-r--r--drivers/scsi/libiscsi_tcp.c6
-rw-r--r--drivers/scsi/mac53c94.c5
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c28
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_version.h2
-rw-r--r--drivers/scsi/scsi_debug.c30
-rw-r--r--drivers/scsi/scsi_devinfo.c247
-rw-r--r--drivers/scsi/scsi_lib.c1
-rw-r--r--drivers/scsi/scsi_priv.h17
-rw-r--r--drivers/scsi/scsi_sysfs.c17
-rw-r--r--drivers/scsi/scsi_transport_fc.c49
-rw-r--r--drivers/scsi/scsi_transport_iscsi.c1
-rw-r--r--drivers/scsi/scsi_transport_sas.c4
-rw-r--r--drivers/scsi/scsi_transport_spi.c51
-rw-r--r--drivers/scsi/sd.c74
-rw-r--r--drivers/scsi/sd.h1
-rw-r--r--drivers/scsi/sg.c10
-rw-r--r--drivers/scsi/sr.c1
-rw-r--r--drivers/scsi/sun3_NCR5380.c3
-rw-r--r--drivers/scsi/sym53c8xx_2/sym_hipd.c5
-rw-r--r--drivers/scsi/zalon.c2
-rw-r--r--drivers/serial/8250_pci.c23
-rw-r--r--drivers/serial/Kconfig10
-rw-r--r--drivers/serial/Makefile1
-rw-r--r--drivers/serial/bfin_5xx.c5
-rw-r--r--drivers/serial/icom.c3
-rw-r--r--drivers/serial/jsm/jsm_tty.c4
-rw-r--r--drivers/serial/msm_serial.c772
-rw-r--r--drivers/serial/msm_serial.h117
-rw-r--r--drivers/serial/s3c2400.c2
-rw-r--r--drivers/serial/s3c2410.c2
-rw-r--r--drivers/serial/s3c2412.c2
-rw-r--r--drivers/serial/s3c2440.c2
-rw-r--r--drivers/serial/s3c24a0.c2
-rw-r--r--drivers/serial/s3c6400.c2
-rw-r--r--drivers/serial/samsung.c2
-rw-r--r--drivers/serial/samsung.h2
-rw-r--r--drivers/serial/sb1250-duart.c6
-rw-r--r--drivers/serial/serial_txx9.c113
-rw-r--r--drivers/serial/sh-sci.c11
-rw-r--r--drivers/serial/sunhv.c2
-rw-r--r--drivers/serial/timbuart.c50
-rw-r--r--drivers/serial/vr41xx_siu.c2
-rw-r--r--drivers/serial/zs.c6
-rw-r--r--drivers/spi/omap_uwire.c2
-rw-r--r--drivers/spi/spi_bitbang.c24
-rw-r--r--drivers/spi/spidev.c17
-rw-r--r--drivers/ssb/driver_mipscore.c85
-rw-r--r--drivers/ssb/pcmcia.c10
-rw-r--r--drivers/staging/octeon/Makefile1
-rw-r--r--drivers/staging/octeon/ethernet-common.c328
-rw-r--r--drivers/staging/octeon/ethernet-common.h29
-rw-r--r--drivers/staging/octeon/ethernet-defines.h2
-rw-r--r--drivers/staging/octeon/ethernet-rgmii.c9
-rw-r--r--drivers/staging/octeon/ethernet-sgmii.c9
-rw-r--r--drivers/staging/octeon/ethernet-spi.c1
-rw-r--r--drivers/staging/octeon/ethernet-tx.c62
-rw-r--r--drivers/staging/octeon/ethernet-tx.h25
-rw-r--r--drivers/staging/octeon/ethernet-xaui.c9
-rw-r--r--drivers/staging/octeon/ethernet.c470
-rw-r--r--drivers/staging/octeon/octeon-ethernet.h11
-rw-r--r--drivers/staging/serqt_usb2/serqt_usb2.c29
-rw-r--r--drivers/usb/class/cdc-acm.c4
-rw-r--r--drivers/usb/core/hcd.c2
-rw-r--r--drivers/usb/host/Kconfig4
-rw-r--r--drivers/usb/mon/mon_bin.c2
-rw-r--r--drivers/usb/serial/usb-serial.c3
-rw-r--r--drivers/uwb/hwa-rc.c2
-rw-r--r--drivers/uwb/wlp/txrx.c2
-rw-r--r--drivers/video/Kconfig2
-rw-r--r--drivers/video/amba-clcd.c7
-rw-r--r--drivers/video/atafb.c8
-rw-r--r--drivers/video/atmel_lcdfb.c3
-rw-r--r--drivers/video/aty/atyfb.h3
-rw-r--r--drivers/video/aty/atyfb_base.c141
-rw-r--r--drivers/video/aty/mach64_accel.c7
-rw-r--r--drivers/video/backlight/tdo24m.c2
-rw-r--r--drivers/video/cobalt_lcdfb.c2
-rw-r--r--drivers/video/fbmem.c13
-rw-r--r--drivers/video/fsl-diu-fb.c20
-rw-r--r--drivers/video/hitfb.c66
-rw-r--r--drivers/video/i810/i810_main.c5
-rw-r--r--drivers/video/matrox/matroxfb_DAC1064.c4
-rw-r--r--drivers/video/matrox/matroxfb_Ti3026.c4
-rw-r--r--drivers/video/matrox/matroxfb_base.c4
-rw-r--r--drivers/video/matrox/matroxfb_crtc2.c7
-rw-r--r--drivers/video/mx3fb.c22
-rw-r--r--drivers/video/nvidia/nv_setup.c3
-rw-r--r--drivers/video/omap/omapfb_main.c4
-rw-r--r--drivers/video/platinumfb.c2
-rw-r--r--drivers/video/pxafb.c2
-rw-r--r--drivers/video/s3c-fb.c17
-rw-r--r--drivers/video/sh7760fb.c19
-rw-r--r--drivers/video/sh_mobile_lcdcfb.c91
-rw-r--r--drivers/video/sis/sis_main.c3
-rw-r--r--drivers/video/sm501fb.c26
-rw-r--r--drivers/video/stifb.c7
-rw-r--r--drivers/video/w100fb.c4
-rw-r--r--drivers/vlynq/Kconfig2
-rw-r--r--drivers/vlynq/vlynq.c2
-rw-r--r--drivers/w1/slaves/w1_ds2760.c30
-rw-r--r--drivers/w1/slaves/w1_ds2760.h7
-rw-r--r--drivers/watchdog/Kconfig59
-rw-r--r--drivers/watchdog/Makefile5
-rw-r--r--drivers/watchdog/bcm47xx_wdt.c286
-rw-r--r--drivers/watchdog/coh901327_wdt.c537
-rw-r--r--drivers/watchdog/hpwdt.c26
-rw-r--r--drivers/watchdog/omap_wdt.c7
-rw-r--r--drivers/watchdog/pnx833x_wdt.c282
-rw-r--r--drivers/watchdog/sa1100_wdt.c5
-rw-r--r--drivers/watchdog/stmp3xxx_wdt.c296
-rw-r--r--drivers/watchdog/twl4030_wdt.c272
-rw-r--r--drivers/watchdog/w83627hf_wdt.c5
-rw-r--r--drivers/watchdog/w83697ug_wdt.c4
-rw-r--r--drivers/watchdog/wdrtas.c8
-rw-r--r--drivers/watchdog/wdt_pci.c122
-rw-r--r--drivers/xen/events.c4
691 files changed, 36132 insertions, 10502 deletions
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index 431f8b439553..7ec7d88c5999 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -266,6 +266,7 @@ config ACPI_DEBUG_FUNC_TRACE
config ACPI_PCI_SLOT
tristate "PCI slot detection driver"
+ depends on SYSFS
default n
help
This driver creates entries in /sys/bus/pci/slots/ for all PCI
diff --git a/drivers/acpi/ac.c b/drivers/acpi/ac.c
index 88e42abf5d88..0df8fcb687d6 100644
--- a/drivers/acpi/ac.c
+++ b/drivers/acpi/ac.c
@@ -61,6 +61,7 @@ static int acpi_ac_open_fs(struct inode *inode, struct file *file);
static int acpi_ac_add(struct acpi_device *device);
static int acpi_ac_remove(struct acpi_device *device, int type);
static int acpi_ac_resume(struct acpi_device *device);
+static void acpi_ac_notify(struct acpi_device *device, u32 event);
static const struct acpi_device_id ac_device_ids[] = {
{"ACPI0003", 0},
@@ -72,10 +73,12 @@ static struct acpi_driver acpi_ac_driver = {
.name = "ac",
.class = ACPI_AC_CLASS,
.ids = ac_device_ids,
+ .flags = ACPI_DRIVER_ALL_NOTIFY_EVENTS,
.ops = {
.add = acpi_ac_add,
.remove = acpi_ac_remove,
.resume = acpi_ac_resume,
+ .notify = acpi_ac_notify,
},
};
@@ -220,16 +223,14 @@ static int acpi_ac_remove_fs(struct acpi_device *device)
Driver Model
-------------------------------------------------------------------------- */
-static void acpi_ac_notify(acpi_handle handle, u32 event, void *data)
+static void acpi_ac_notify(struct acpi_device *device, u32 event)
{
- struct acpi_ac *ac = data;
- struct acpi_device *device = NULL;
+ struct acpi_ac *ac = acpi_driver_data(device);
if (!ac)
return;
- device = ac->device;
switch (event) {
default:
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
@@ -253,7 +254,6 @@ static void acpi_ac_notify(acpi_handle handle, u32 event, void *data)
static int acpi_ac_add(struct acpi_device *device)
{
int result = 0;
- acpi_status status = AE_OK;
struct acpi_ac *ac = NULL;
@@ -286,13 +286,6 @@ static int acpi_ac_add(struct acpi_device *device)
ac->charger.get_property = get_ac_property;
power_supply_register(&ac->device->dev, &ac->charger);
#endif
- status = acpi_install_notify_handler(device->handle,
- ACPI_ALL_NOTIFY, acpi_ac_notify,
- ac);
- if (ACPI_FAILURE(status)) {
- result = -ENODEV;
- goto end;
- }
printk(KERN_INFO PREFIX "%s [%s] (%s)\n",
acpi_device_name(device), acpi_device_bid(device),
@@ -328,7 +321,6 @@ static int acpi_ac_resume(struct acpi_device *device)
static int acpi_ac_remove(struct acpi_device *device, int type)
{
- acpi_status status = AE_OK;
struct acpi_ac *ac = NULL;
@@ -337,8 +329,6 @@ static int acpi_ac_remove(struct acpi_device *device, int type)
ac = acpi_driver_data(device);
- status = acpi_remove_notify_handler(device->handle,
- ACPI_ALL_NOTIFY, acpi_ac_notify);
#ifdef CONFIG_ACPI_SYSFS_POWER
if (ac->charger.dev)
power_supply_unregister(&ac->charger);
diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
index b0de6312919a..58b4517ce712 100644
--- a/drivers/acpi/battery.c
+++ b/drivers/acpi/battery.c
@@ -796,13 +796,12 @@ static void acpi_battery_remove_fs(struct acpi_device *device)
Driver Interface
-------------------------------------------------------------------------- */
-static void acpi_battery_notify(acpi_handle handle, u32 event, void *data)
+static void acpi_battery_notify(struct acpi_device *device, u32 event)
{
- struct acpi_battery *battery = data;
- struct acpi_device *device;
+ struct acpi_battery *battery = acpi_driver_data(device);
+
if (!battery)
return;
- device = battery->device;
acpi_battery_update(battery);
acpi_bus_generate_proc_event(device, event,
acpi_battery_present(battery));
@@ -819,7 +818,6 @@ static void acpi_battery_notify(acpi_handle handle, u32 event, void *data)
static int acpi_battery_add(struct acpi_device *device)
{
int result = 0;
- acpi_status status = 0;
struct acpi_battery *battery = NULL;
if (!device)
return -EINVAL;
@@ -834,22 +832,12 @@ static int acpi_battery_add(struct acpi_device *device)
acpi_battery_update(battery);
#ifdef CONFIG_ACPI_PROCFS_POWER
result = acpi_battery_add_fs(device);
- if (result)
- goto end;
#endif
- status = acpi_install_notify_handler(device->handle,
- ACPI_ALL_NOTIFY,
- acpi_battery_notify, battery);
- if (ACPI_FAILURE(status)) {
- ACPI_EXCEPTION((AE_INFO, status, "Installing notify handler"));
- result = -ENODEV;
- goto end;
- }
- printk(KERN_INFO PREFIX "%s Slot [%s] (battery %s)\n",
- ACPI_BATTERY_DEVICE_NAME, acpi_device_bid(device),
- device->status.battery_present ? "present" : "absent");
- end:
- if (result) {
+ if (!result) {
+ printk(KERN_INFO PREFIX "%s Slot [%s] (battery %s)\n",
+ ACPI_BATTERY_DEVICE_NAME, acpi_device_bid(device),
+ device->status.battery_present ? "present" : "absent");
+ } else {
#ifdef CONFIG_ACPI_PROCFS_POWER
acpi_battery_remove_fs(device);
#endif
@@ -860,15 +848,11 @@ static int acpi_battery_add(struct acpi_device *device)
static int acpi_battery_remove(struct acpi_device *device, int type)
{
- acpi_status status = 0;
struct acpi_battery *battery = NULL;
if (!device || !acpi_driver_data(device))
return -EINVAL;
battery = acpi_driver_data(device);
- status = acpi_remove_notify_handler(device->handle,
- ACPI_ALL_NOTIFY,
- acpi_battery_notify);
#ifdef CONFIG_ACPI_PROCFS_POWER
acpi_battery_remove_fs(device);
#endif
@@ -896,10 +880,12 @@ static struct acpi_driver acpi_battery_driver = {
.name = "battery",
.class = ACPI_BATTERY_CLASS,
.ids = battery_device_ids,
+ .flags = ACPI_DRIVER_ALL_NOTIFY_EVENTS,
.ops = {
.add = acpi_battery_add,
.resume = acpi_battery_resume,
.remove = acpi_battery_remove,
+ .notify = acpi_battery_notify,
},
};
diff --git a/drivers/acpi/blacklist.c b/drivers/acpi/blacklist.c
index 09c69806c1fc..f6baa77deefb 100644
--- a/drivers/acpi/blacklist.c
+++ b/drivers/acpi/blacklist.c
@@ -192,6 +192,22 @@ static struct dmi_system_id acpi_osi_dmi_table[] __initdata = {
DMI_MATCH(DMI_PRODUCT_NAME, "ESPRIMO Mobile V5505"),
},
},
+ {
+ .callback = dmi_disable_osi_vista,
+ .ident = "Sony VGN-NS10J_S",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "VGN-NS10J_S"),
+ },
+ },
+ {
+ .callback = dmi_disable_osi_vista,
+ .ident = "Sony VGN-SR290J",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Sony VGN-SR290J"),
+ },
+ },
/*
* BIOS invocation of _OSI(Linux) is almost always a BIOS bug.
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
index ae862f1798dc..2876fc70c3a9 100644
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
@@ -450,18 +450,16 @@ int acpi_bus_receive_event(struct acpi_bus_event *event)
Notification Handling
-------------------------------------------------------------------------- */
-static int
-acpi_bus_check_device(struct acpi_device *device, int *status_changed)
+static void acpi_bus_check_device(acpi_handle handle)
{
- acpi_status status = 0;
+ struct acpi_device *device;
+ acpi_status status;
struct acpi_device_status old_status;
-
+ if (acpi_bus_get_device(handle, &device))
+ return;
if (!device)
- return -EINVAL;
-
- if (status_changed)
- *status_changed = 0;
+ return;
old_status = device->status;
@@ -471,22 +469,15 @@ acpi_bus_check_device(struct acpi_device *device, int *status_changed)
*/
if (device->parent && !device->parent->status.present) {
device->status = device->parent->status;
- if (STRUCT_TO_INT(old_status) != STRUCT_TO_INT(device->status)) {
- if (status_changed)
- *status_changed = 1;
- }
- return 0;
+ return;
}
status = acpi_bus_get_status(device);
if (ACPI_FAILURE(status))
- return -ENODEV;
+ return;
if (STRUCT_TO_INT(old_status) == STRUCT_TO_INT(device->status))
- return 0;
-
- if (status_changed)
- *status_changed = 1;
+ return;
/*
* Device Insertion/Removal
@@ -498,33 +489,17 @@ acpi_bus_check_device(struct acpi_device *device, int *status_changed)
ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device removal detected\n"));
/* TBD: Handle device removal */
}
-
- return 0;
}
-static int acpi_bus_check_scope(struct acpi_device *device)
+static void acpi_bus_check_scope(acpi_handle handle)
{
- int result = 0;
- int status_changed = 0;
-
-
- if (!device)
- return -EINVAL;
-
/* Status Change? */
- result = acpi_bus_check_device(device, &status_changed);
- if (result)
- return result;
-
- if (!status_changed)
- return 0;
+ acpi_bus_check_device(handle);
/*
* TBD: Enumerate child devices within this device's scope and
* run acpi_bus_check_device()'s on them.
*/
-
- return 0;
}
static BLOCKING_NOTIFIER_HEAD(acpi_bus_notify_list);
@@ -547,22 +522,19 @@ EXPORT_SYMBOL_GPL(unregister_acpi_bus_notifier);
*/
static void acpi_bus_notify(acpi_handle handle, u32 type, void *data)
{
- int result = 0;
struct acpi_device *device = NULL;
+ struct acpi_driver *driver;
+
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Notification %#02x to handle %p\n",
+ type, handle));
blocking_notifier_call_chain(&acpi_bus_notify_list,
type, (void *)handle);
- if (acpi_bus_get_device(handle, &device))
- return;
-
switch (type) {
case ACPI_NOTIFY_BUS_CHECK:
- ACPI_DEBUG_PRINT((ACPI_DB_INFO,
- "Received BUS CHECK notification for device [%s]\n",
- device->pnp.bus_id));
- result = acpi_bus_check_scope(device);
+ acpi_bus_check_scope(handle);
/*
* TBD: We'll need to outsource certain events to non-ACPI
* drivers via the device manager (device.c).
@@ -570,10 +542,7 @@ static void acpi_bus_notify(acpi_handle handle, u32 type, void *data)
break;
case ACPI_NOTIFY_DEVICE_CHECK:
- ACPI_DEBUG_PRINT((ACPI_DB_INFO,
- "Received DEVICE CHECK notification for device [%s]\n",
- device->pnp.bus_id));
- result = acpi_bus_check_device(device, NULL);
+ acpi_bus_check_device(handle);
/*
* TBD: We'll need to outsource certain events to non-ACPI
* drivers via the device manager (device.c).
@@ -581,44 +550,26 @@ static void acpi_bus_notify(acpi_handle handle, u32 type, void *data)
break;
case ACPI_NOTIFY_DEVICE_WAKE:
- ACPI_DEBUG_PRINT((ACPI_DB_INFO,
- "Received DEVICE WAKE notification for device [%s]\n",
- device->pnp.bus_id));
/* TBD */
break;
case ACPI_NOTIFY_EJECT_REQUEST:
- ACPI_DEBUG_PRINT((ACPI_DB_INFO,
- "Received EJECT REQUEST notification for device [%s]\n",
- device->pnp.bus_id));
/* TBD */
break;
case ACPI_NOTIFY_DEVICE_CHECK_LIGHT:
- ACPI_DEBUG_PRINT((ACPI_DB_INFO,
- "Received DEVICE CHECK LIGHT notification for device [%s]\n",
- device->pnp.bus_id));
/* TBD: Exactly what does 'light' mean? */
break;
case ACPI_NOTIFY_FREQUENCY_MISMATCH:
- ACPI_DEBUG_PRINT((ACPI_DB_INFO,
- "Received FREQUENCY MISMATCH notification for device [%s]\n",
- device->pnp.bus_id));
/* TBD */
break;
case ACPI_NOTIFY_BUS_MODE_MISMATCH:
- ACPI_DEBUG_PRINT((ACPI_DB_INFO,
- "Received BUS MODE MISMATCH notification for device [%s]\n",
- device->pnp.bus_id));
/* TBD */
break;
case ACPI_NOTIFY_POWER_FAULT:
- ACPI_DEBUG_PRINT((ACPI_DB_INFO,
- "Received POWER FAULT notification for device [%s]\n",
- device->pnp.bus_id));
/* TBD */
break;
@@ -629,7 +580,13 @@ static void acpi_bus_notify(acpi_handle handle, u32 type, void *data)
break;
}
- return;
+ acpi_bus_get_device(handle, &device);
+ if (device) {
+ driver = device->driver;
+ if (driver && driver->ops.notify &&
+ (driver->flags & ACPI_DRIVER_ALL_NOTIFY_EVENTS))
+ driver->ops.notify(device, type);
+ }
}
/* --------------------------------------------------------------------------
diff --git a/drivers/acpi/glue.c b/drivers/acpi/glue.c
index 8bd2c2a6884d..a8a5c29958c8 100644
--- a/drivers/acpi/glue.c
+++ b/drivers/acpi/glue.c
@@ -140,46 +140,6 @@ struct device *acpi_get_physical_device(acpi_handle handle)
EXPORT_SYMBOL(acpi_get_physical_device);
-/* ToDo: When a PCI bridge is found, return the PCI device behind the bridge
- * This should work in general, but did not on a Lenovo T61 for the
- * graphics card. But this must be fixed when the PCI device is
- * bound and the kernel device struct is attached to the acpi device
- * Note: A success call will increase reference count by one
- * Do call put_device(dev) on the returned device then
- */
-struct device *acpi_get_physical_pci_device(acpi_handle handle)
-{
- struct device *dev;
- long long device_id;
- acpi_status status;
-
- status =
- acpi_evaluate_integer(handle, "_ADR", NULL, &device_id);
-
- if (ACPI_FAILURE(status))
- return NULL;
-
- /* We need to attempt to determine whether the _ADR refers to a
- PCI device or not. There's no terribly good way to do this,
- so the best we can hope for is to assume that there'll never
- be a device in the host bridge */
- if (device_id >= 0x10000) {
- /* It looks like a PCI device. Does it exist? */
- dev = acpi_get_physical_device(handle);
- } else {
- /* It doesn't look like a PCI device. Does its parent
- exist? */
- acpi_handle phandle;
- if (acpi_get_parent(handle, &phandle))
- return NULL;
- dev = acpi_get_physical_device(phandle);
- }
- if (!dev)
- return NULL;
- return dev;
-}
-EXPORT_SYMBOL(acpi_get_physical_pci_device);
-
static int acpi_bind_one(struct device *dev, acpi_handle handle)
{
struct acpi_device *acpi_dev;
diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
index d916bea729f1..71670719d61a 100644
--- a/drivers/acpi/osl.c
+++ b/drivers/acpi/osl.c
@@ -79,6 +79,7 @@ static acpi_osd_handler acpi_irq_handler;
static void *acpi_irq_context;
static struct workqueue_struct *kacpid_wq;
static struct workqueue_struct *kacpi_notify_wq;
+static struct workqueue_struct *kacpi_hotplug_wq;
struct acpi_res_list {
resource_size_t start;
@@ -192,8 +193,10 @@ acpi_status acpi_os_initialize1(void)
{
kacpid_wq = create_singlethread_workqueue("kacpid");
kacpi_notify_wq = create_singlethread_workqueue("kacpi_notify");
+ kacpi_hotplug_wq = create_singlethread_workqueue("kacpi_hotplug");
BUG_ON(!kacpid_wq);
BUG_ON(!kacpi_notify_wq);
+ BUG_ON(!kacpi_hotplug_wq);
return AE_OK;
}
@@ -206,6 +209,7 @@ acpi_status acpi_os_terminate(void)
destroy_workqueue(kacpid_wq);
destroy_workqueue(kacpi_notify_wq);
+ destroy_workqueue(kacpi_hotplug_wq);
return AE_OK;
}
@@ -716,6 +720,7 @@ static acpi_status __acpi_os_execute(acpi_execute_type type,
acpi_status status = AE_OK;
struct acpi_os_dpc *dpc;
struct workqueue_struct *queue;
+ work_func_t func;
int ret;
ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
"Scheduling function [%p(%p)] for deferred execution.\n",
@@ -740,15 +745,17 @@ static acpi_status __acpi_os_execute(acpi_execute_type type,
dpc->function = function;
dpc->context = context;
- if (!hp) {
- INIT_WORK(&dpc->work, acpi_os_execute_deferred);
- queue = (type == OSL_NOTIFY_HANDLER) ?
- kacpi_notify_wq : kacpid_wq;
- ret = queue_work(queue, &dpc->work);
- } else {
- INIT_WORK(&dpc->work, acpi_os_execute_hp_deferred);
- ret = schedule_work(&dpc->work);
- }
+ /*
+ * We can't run hotplug code in keventd_wq/kacpid_wq/kacpid_notify_wq
+ * because the hotplug code may call driver .remove() functions,
+ * which invoke flush_scheduled_work/acpi_os_wait_events_complete
+ * to flush these workqueues.
+ */
+ queue = hp ? kacpi_hotplug_wq :
+ (type == OSL_NOTIFY_HANDLER ? kacpi_notify_wq : kacpid_wq);
+ func = hp ? acpi_os_execute_hp_deferred : acpi_os_execute_deferred;
+ INIT_WORK(&dpc->work, func);
+ ret = queue_work(queue, &dpc->work);
if (!ret) {
printk(KERN_ERR PREFIX
diff --git a/drivers/acpi/pci_bind.c b/drivers/acpi/pci_bind.c
index bc46de3d967f..a5a77b78a723 100644
--- a/drivers/acpi/pci_bind.c
+++ b/drivers/acpi/pci_bind.c
@@ -24,12 +24,7 @@
*/
#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/init.h>
#include <linux/types.h>
-#include <linux/proc_fs.h>
-#include <linux/spinlock.h>
-#include <linux/pm.h>
#include <linux/pci.h>
#include <linux/acpi.h>
#include <acpi/acpi_bus.h>
@@ -38,310 +33,76 @@
#define _COMPONENT ACPI_PCI_COMPONENT
ACPI_MODULE_NAME("pci_bind");
-struct acpi_pci_data {
- struct acpi_pci_id id;
- struct pci_bus *bus;
- struct pci_dev *dev;
-};
-
-static int acpi_pci_unbind(struct acpi_device *device);
-
-static void acpi_pci_data_handler(acpi_handle handle, u32 function,
- void *context)
-{
-
- /* TBD: Anything we need to do here? */
-
- return;
-}
-
-/**
- * acpi_get_pci_id
- * ------------------
- * This function is used by the ACPI Interpreter (a.k.a. Core Subsystem)
- * to resolve PCI information for ACPI-PCI devices defined in the namespace.
- * This typically occurs when resolving PCI operation region information.
- */
-acpi_status acpi_get_pci_id(acpi_handle handle, struct acpi_pci_id *id)
+static int acpi_pci_unbind(struct acpi_device *device)
{
- int result = 0;
- acpi_status status = AE_OK;
- struct acpi_device *device = NULL;
- struct acpi_pci_data *data = NULL;
-
-
- if (!id)
- return AE_BAD_PARAMETER;
-
- result = acpi_bus_get_device(handle, &device);
- if (result) {
- printk(KERN_ERR PREFIX
- "Invalid ACPI Bus context for device %s\n",
- acpi_device_bid(device));
- return AE_NOT_EXIST;
- }
-
- status = acpi_get_data(handle, acpi_pci_data_handler, (void **)&data);
- if (ACPI_FAILURE(status) || !data) {
- ACPI_EXCEPTION((AE_INFO, status,
- "Invalid ACPI-PCI context for device %s",
- acpi_device_bid(device)));
- return status;
- }
+ struct pci_dev *dev;
- *id = data->id;
+ dev = acpi_get_pci_dev(device->handle);
+ if (!dev || !dev->subordinate)
+ goto out;
- /*
- id->segment = data->id.segment;
- id->bus = data->id.bus;
- id->device = data->id.device;
- id->function = data->id.function;
- */
+ acpi_pci_irq_del_prt(dev->subordinate);
- ACPI_DEBUG_PRINT((ACPI_DB_INFO,
- "Device %s has PCI address %04x:%02x:%02x.%d\n",
- acpi_device_bid(device), id->segment, id->bus,
- id->device, id->function));
+ device->ops.bind = NULL;
+ device->ops.unbind = NULL;
- return AE_OK;
+out:
+ pci_dev_put(dev);
+ return 0;
}
-EXPORT_SYMBOL(acpi_get_pci_id);
-
-int acpi_pci_bind(struct acpi_device *device)
+static int acpi_pci_bind(struct acpi_device *device)
{
- int result = 0;
acpi_status status;
- struct acpi_pci_data *data;
- struct acpi_pci_data *pdata;
- struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
acpi_handle handle;
+ struct pci_bus *bus;
+ struct pci_dev *dev;
- if (!device || !device->parent)
- return -EINVAL;
-
- data = kzalloc(sizeof(struct acpi_pci_data), GFP_KERNEL);
- if (!data)
- return -ENOMEM;
-
- status = acpi_get_name(device->handle, ACPI_FULL_PATHNAME, &buffer);
- if (ACPI_FAILURE(status)) {
- kfree(data);
- return -ENODEV;
- }
-
- ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Binding PCI device [%s]...\n",
- (char *)buffer.pointer));
-
- /*
- * Segment & Bus
- * -------------
- * These are obtained via the parent device's ACPI-PCI context.
- */
- status = acpi_get_data(device->parent->handle, acpi_pci_data_handler,
- (void **)&pdata);
- if (ACPI_FAILURE(status) || !pdata || !pdata->bus) {
- ACPI_EXCEPTION((AE_INFO, status,
- "Invalid ACPI-PCI context for parent device %s",
- acpi_device_bid(device->parent)));
- result = -ENODEV;
- goto end;
- }
- data->id.segment = pdata->id.segment;
- data->id.bus = pdata->bus->number;
-
- /*
- * Device & Function
- * -----------------
- * These are simply obtained from the device's _ADR method. Note
- * that a value of zero is valid.
- */
- data->id.device = device->pnp.bus_address >> 16;
- data->id.function = device->pnp.bus_address & 0xFFFF;
-
- ACPI_DEBUG_PRINT((ACPI_DB_INFO, "...to %04x:%02x:%02x.%d\n",
- data->id.segment, data->id.bus, data->id.device,
- data->id.function));
-
- /*
- * TBD: Support slot devices (e.g. function=0xFFFF).
- */
-
- /*
- * Locate PCI Device
- * -----------------
- * Locate matching device in PCI namespace. If it doesn't exist
- * this typically means that the device isn't currently inserted
- * (e.g. docking station, port replicator, etc.).
- */
- data->dev = pci_get_slot(pdata->bus,
- PCI_DEVFN(data->id.device, data->id.function));
- if (!data->dev) {
- ACPI_DEBUG_PRINT((ACPI_DB_INFO,
- "Device %04x:%02x:%02x.%d not present in PCI namespace\n",
- data->id.segment, data->id.bus,
- data->id.device, data->id.function));
- result = -ENODEV;
- goto end;
- }
- if (!data->dev->bus) {
- printk(KERN_ERR PREFIX
- "Device %04x:%02x:%02x.%d has invalid 'bus' field\n",
- data->id.segment, data->id.bus,
- data->id.device, data->id.function);
- result = -ENODEV;
- goto end;
- }
+ dev = acpi_get_pci_dev(device->handle);
+ if (!dev)
+ return 0;
/*
- * PCI Bridge?
- * -----------
- * If so, set the 'bus' field and install the 'bind' function to
- * facilitate callbacks for all of its children.
+ * Install the 'bind' function to facilitate callbacks for
+ * children of the P2P bridge.
*/
- if (data->dev->subordinate) {
+ if (dev->subordinate) {
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
"Device %04x:%02x:%02x.%d is a PCI bridge\n",
- data->id.segment, data->id.bus,
- data->id.device, data->id.function));
- data->bus = data->dev->subordinate;
+ pci_domain_nr(dev->bus), dev->bus->number,
+ PCI_SLOT(dev->devfn), PCI_FUNC(dev->devfn)));
device->ops.bind = acpi_pci_bind;
device->ops.unbind = acpi_pci_unbind;
}
/*
- * Attach ACPI-PCI Context
- * -----------------------
- * Thus binding the ACPI and PCI devices.
- */
- status = acpi_attach_data(device->handle, acpi_pci_data_handler, data);
- if (ACPI_FAILURE(status)) {
- ACPI_EXCEPTION((AE_INFO, status,
- "Unable to attach ACPI-PCI context to device %s",
- acpi_device_bid(device)));
- result = -ENODEV;
- goto end;
- }
-
- /*
- * PCI Routing Table
- * -----------------
- * Evaluate and parse _PRT, if exists. This code is independent of
- * PCI bridges (above) to allow parsing of _PRT objects within the
- * scope of non-bridge devices. Note that _PRTs within the scope of
- * a PCI bridge assume the bridge's subordinate bus number.
+ * Evaluate and parse _PRT, if exists. This code allows parsing of
+ * _PRT objects within the scope of non-bridge devices. Note that
+ * _PRTs within the scope of a PCI bridge assume the bridge's
+ * subordinate bus number.
*
* TBD: Can _PRTs exist within the scope of non-bridge PCI devices?
*/
status = acpi_get_handle(device->handle, METHOD_NAME__PRT, &handle);
- if (ACPI_SUCCESS(status)) {
- if (data->bus) /* PCI-PCI bridge */
- acpi_pci_irq_add_prt(device->handle, data->id.segment,
- data->bus->number);
- else /* non-bridge PCI device */
- acpi_pci_irq_add_prt(device->handle, data->id.segment,
- data->id.bus);
- }
-
- end:
- kfree(buffer.pointer);
- if (result) {
- pci_dev_put(data->dev);
- kfree(data);
- }
- return result;
-}
-
-static int acpi_pci_unbind(struct acpi_device *device)
-{
- int result = 0;
- acpi_status status;
- struct acpi_pci_data *data;
- struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
-
-
- if (!device || !device->parent)
- return -EINVAL;
-
- status = acpi_get_name(device->handle, ACPI_FULL_PATHNAME, &buffer);
if (ACPI_FAILURE(status))
- return -ENODEV;
+ goto out;
- ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Unbinding PCI device [%s]...\n",
- (char *) buffer.pointer));
- kfree(buffer.pointer);
+ if (dev->subordinate)
+ bus = dev->subordinate;
+ else
+ bus = dev->bus;
- status =
- acpi_get_data(device->handle, acpi_pci_data_handler,
- (void **)&data);
- if (ACPI_FAILURE(status)) {
- result = -ENODEV;
- goto end;
- }
+ acpi_pci_irq_add_prt(device->handle, bus);
- status = acpi_detach_data(device->handle, acpi_pci_data_handler);
- if (ACPI_FAILURE(status)) {
- ACPI_EXCEPTION((AE_INFO, status,
- "Unable to detach data from device %s",
- acpi_device_bid(device)));
- result = -ENODEV;
- goto end;
- }
- if (data->dev->subordinate) {
- acpi_pci_irq_del_prt(data->id.segment, data->bus->number);
- }
- pci_dev_put(data->dev);
- kfree(data);
-
- end:
- return result;
+out:
+ pci_dev_put(dev);
+ return 0;
}
-int
-acpi_pci_bind_root(struct acpi_device *device,
- struct acpi_pci_id *id, struct pci_bus *bus)
+int acpi_pci_bind_root(struct acpi_device *device)
{
- int result = 0;
- acpi_status status;
- struct acpi_pci_data *data = NULL;
- struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
-
- if (!device || !id || !bus) {
- return -EINVAL;
- }
-
- data = kzalloc(sizeof(struct acpi_pci_data), GFP_KERNEL);
- if (!data)
- return -ENOMEM;
-
- data->id = *id;
- data->bus = bus;
device->ops.bind = acpi_pci_bind;
device->ops.unbind = acpi_pci_unbind;
- status = acpi_get_name(device->handle, ACPI_FULL_PATHNAME, &buffer);
- if (ACPI_FAILURE(status)) {
- kfree (data);
- return -ENODEV;
- }
-
- ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Binding PCI root bridge [%s] to "
- "%04x:%02x\n", (char *)buffer.pointer,
- id->segment, id->bus));
-
- status = acpi_attach_data(device->handle, acpi_pci_data_handler, data);
- if (ACPI_FAILURE(status)) {
- ACPI_EXCEPTION((AE_INFO, status,
- "Unable to attach ACPI-PCI context to device %s",
- (char *)buffer.pointer));
- result = -ENODEV;
- goto end;
- }
-
- end:
- kfree(buffer.pointer);
- if (result != 0)
- kfree(data);
-
- return result;
+ return 0;
}
diff --git a/drivers/acpi/pci_irq.c b/drivers/acpi/pci_irq.c
index 2faa9e2ac893..b794eb88ab90 100644
--- a/drivers/acpi/pci_irq.c
+++ b/drivers/acpi/pci_irq.c
@@ -182,7 +182,7 @@ static void do_prt_fixups(struct acpi_prt_entry *entry,
}
}
-static int acpi_pci_irq_add_entry(acpi_handle handle, int segment, int bus,
+static int acpi_pci_irq_add_entry(acpi_handle handle, struct pci_bus *bus,
struct acpi_pci_routing_table *prt)
{
struct acpi_prt_entry *entry;
@@ -196,8 +196,8 @@ static int acpi_pci_irq_add_entry(acpi_handle handle, int segment, int bus,
* 1=INTA, 2=INTB. We use the PCI encoding throughout, so convert
* it here.
*/
- entry->id.segment = segment;
- entry->id.bus = bus;
+ entry->id.segment = pci_domain_nr(bus);
+ entry->id.bus = bus->number;
entry->id.device = (prt->address >> 16) & 0xFFFF;
entry->pin = prt->pin + 1;
@@ -242,7 +242,7 @@ static int acpi_pci_irq_add_entry(acpi_handle handle, int segment, int bus,
return 0;
}
-int acpi_pci_irq_add_prt(acpi_handle handle, int segment, int bus)
+int acpi_pci_irq_add_prt(acpi_handle handle, struct pci_bus *bus)
{
acpi_status status;
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
@@ -271,7 +271,7 @@ int acpi_pci_irq_add_prt(acpi_handle handle, int segment, int bus)
entry = buffer.pointer;
while (entry && (entry->length > 0)) {
- acpi_pci_irq_add_entry(handle, segment, bus, entry);
+ acpi_pci_irq_add_entry(handle, bus, entry);
entry = (struct acpi_pci_routing_table *)
((unsigned long)entry + entry->length);
}
@@ -280,16 +280,17 @@ int acpi_pci_irq_add_prt(acpi_handle handle, int segment, int bus)
return 0;
}
-void acpi_pci_irq_del_prt(int segment, int bus)
+void acpi_pci_irq_del_prt(struct pci_bus *bus)
{
struct acpi_prt_entry *entry, *tmp;
printk(KERN_DEBUG
"ACPI: Delete PCI Interrupt Routing Table for %04x:%02x\n",
- segment, bus);
+ pci_domain_nr(bus), bus->number);
spin_lock(&acpi_prt_lock);
list_for_each_entry_safe(entry, tmp, &acpi_prt_list, list) {
- if (segment == entry->id.segment && bus == entry->id.bus) {
+ if (pci_domain_nr(bus) == entry->id.segment
+ && bus->number == entry->id.bus) {
list_del(&entry->list);
kfree(entry);
}
diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c
index 196f97d00956..55b5b90c2a44 100644
--- a/drivers/acpi/pci_root.c
+++ b/drivers/acpi/pci_root.c
@@ -63,9 +63,10 @@ static struct acpi_driver acpi_pci_root_driver = {
struct acpi_pci_root {
struct list_head node;
- struct acpi_device * device;
- struct acpi_pci_id id;
+ struct acpi_device *device;
struct pci_bus *bus;
+ u16 segment;
+ u8 bus_nr;
u32 osc_support_set; /* _OSC state of support bits */
u32 osc_control_set; /* _OSC state of control bits */
@@ -82,7 +83,7 @@ static DEFINE_MUTEX(osc_lock);
int acpi_pci_register_driver(struct acpi_pci_driver *driver)
{
int n = 0;
- struct list_head *entry;
+ struct acpi_pci_root *root;
struct acpi_pci_driver **pptr = &sub_driver;
while (*pptr)
@@ -92,9 +93,7 @@ int acpi_pci_register_driver(struct acpi_pci_driver *driver)
if (!driver->add)
return 0;
- list_for_each(entry, &acpi_pci_roots) {
- struct acpi_pci_root *root;
- root = list_entry(entry, struct acpi_pci_root, node);
+ list_for_each_entry(root, &acpi_pci_roots, node) {
driver->add(root->device->handle);
n++;
}
@@ -106,7 +105,7 @@ EXPORT_SYMBOL(acpi_pci_register_driver);
void acpi_pci_unregister_driver(struct acpi_pci_driver *driver)
{
- struct list_head *entry;
+ struct acpi_pci_root *root;
struct acpi_pci_driver **pptr = &sub_driver;
while (*pptr) {
@@ -120,28 +119,48 @@ void acpi_pci_unregister_driver(struct acpi_pci_driver *driver)
if (!driver->remove)
return;
- list_for_each(entry, &acpi_pci_roots) {
- struct acpi_pci_root *root;
- root = list_entry(entry, struct acpi_pci_root, node);
+ list_for_each_entry(root, &acpi_pci_roots, node)
driver->remove(root->device->handle);
- }
}
EXPORT_SYMBOL(acpi_pci_unregister_driver);
acpi_handle acpi_get_pci_rootbridge_handle(unsigned int seg, unsigned int bus)
{
- struct acpi_pci_root *tmp;
+ struct acpi_pci_root *root;
- list_for_each_entry(tmp, &acpi_pci_roots, node) {
- if ((tmp->id.segment == (u16) seg) && (tmp->id.bus == (u16) bus))
- return tmp->device->handle;
- }
+ list_for_each_entry(root, &acpi_pci_roots, node)
+ if ((root->segment == (u16) seg) && (root->bus_nr == (u16) bus))
+ return root->device->handle;
return NULL;
}
EXPORT_SYMBOL_GPL(acpi_get_pci_rootbridge_handle);
+/**
+ * acpi_is_root_bridge - determine whether an ACPI CA node is a PCI root bridge
+ * @handle - the ACPI CA node in question.
+ *
+ * Note: we could make this API take a struct acpi_device * instead, but
+ * for now, it's more convenient to operate on an acpi_handle.
+ */
+int acpi_is_root_bridge(acpi_handle handle)
+{
+ int ret;
+ struct acpi_device *device;
+
+ ret = acpi_bus_get_device(handle, &device);
+ if (ret)
+ return 0;
+
+ ret = acpi_match_device_ids(device, root_device_ids);
+ if (ret)
+ return 0;
+ else
+ return 1;
+}
+EXPORT_SYMBOL_GPL(acpi_is_root_bridge);
+
static acpi_status
get_root_bridge_busnr_callback(struct acpi_resource *resource, void *data)
{
@@ -161,19 +180,22 @@ get_root_bridge_busnr_callback(struct acpi_resource *resource, void *data)
return AE_OK;
}
-static acpi_status try_get_root_bridge_busnr(acpi_handle handle, int *busnum)
+static acpi_status try_get_root_bridge_busnr(acpi_handle handle,
+ unsigned long long *bus)
{
acpi_status status;
+ int busnum;
- *busnum = -1;
+ busnum = -1;
status =
acpi_walk_resources(handle, METHOD_NAME__CRS,
- get_root_bridge_busnr_callback, busnum);
+ get_root_bridge_busnr_callback, &busnum);
if (ACPI_FAILURE(status))
return status;
/* Check if we really get a bus number from _CRS */
- if (*busnum == -1)
+ if (busnum == -1)
return AE_ERROR;
+ *bus = busnum;
return AE_OK;
}
@@ -298,6 +320,7 @@ static acpi_status acpi_pci_osc_support(struct acpi_pci_root *root, u32 flags)
static struct acpi_pci_root *acpi_pci_find_root(acpi_handle handle)
{
struct acpi_pci_root *root;
+
list_for_each_entry(root, &acpi_pci_roots, node) {
if (root->device->handle == handle)
return root;
@@ -305,6 +328,87 @@ static struct acpi_pci_root *acpi_pci_find_root(acpi_handle handle)
return NULL;
}
+struct acpi_handle_node {
+ struct list_head node;
+ acpi_handle handle;
+};
+
+/**
+ * acpi_get_pci_dev - convert ACPI CA handle to struct pci_dev
+ * @handle: the handle in question
+ *
+ * Given an ACPI CA handle, the desired PCI device is located in the
+ * list of PCI devices.
+ *
+ * If the device is found, its reference count is increased and this
+ * function returns a pointer to its data structure. The caller must
+ * decrement the reference count by calling pci_dev_put().
+ * If no device is found, %NULL is returned.
+ */
+struct pci_dev *acpi_get_pci_dev(acpi_handle handle)
+{
+ int dev, fn;
+ unsigned long long adr;
+ acpi_status status;
+ acpi_handle phandle;
+ struct pci_bus *pbus;
+ struct pci_dev *pdev = NULL;
+ struct acpi_handle_node *node, *tmp;
+ struct acpi_pci_root *root;
+ LIST_HEAD(device_list);
+
+ /*
+ * Walk up the ACPI CA namespace until we reach a PCI root bridge.
+ */
+ phandle = handle;
+ while (!acpi_is_root_bridge(phandle)) {
+ node = kzalloc(sizeof(struct acpi_handle_node), GFP_KERNEL);
+ if (!node)
+ goto out;
+
+ INIT_LIST_HEAD(&node->node);
+ node->handle = phandle;
+ list_add(&node->node, &device_list);
+
+ status = acpi_get_parent(phandle, &phandle);
+ if (ACPI_FAILURE(status))
+ goto out;
+ }
+
+ root = acpi_pci_find_root(phandle);
+ if (!root)
+ goto out;
+
+ pbus = root->bus;
+
+ /*
+ * Now, walk back down the PCI device tree until we return to our
+ * original handle. Assumes that everything between the PCI root
+ * bridge and the device we're looking for must be a P2P bridge.
+ */
+ list_for_each_entry(node, &device_list, node) {
+ acpi_handle hnd = node->handle;
+ status = acpi_evaluate_integer(hnd, "_ADR", NULL, &adr);
+ if (ACPI_FAILURE(status))
+ goto out;
+ dev = (adr >> 16) & 0xffff;
+ fn = adr & 0xffff;
+
+ pdev = pci_get_slot(pbus, PCI_DEVFN(dev, fn));
+ if (!pdev || hnd == handle)
+ break;
+
+ pbus = pdev->subordinate;
+ pci_dev_put(pdev);
+ }
+out:
+ list_for_each_entry_safe(node, tmp, &device_list, node)
+ kfree(node);
+
+ return pdev;
+}
+EXPORT_SYMBOL_GPL(acpi_get_pci_dev);
+
/**
* acpi_pci_osc_control_set - commit requested control to Firmware
* @handle: acpi_handle for the target ACPI object
@@ -363,31 +467,46 @@ EXPORT_SYMBOL(acpi_pci_osc_control_set);
static int __devinit acpi_pci_root_add(struct acpi_device *device)
{
- int result = 0;
- struct acpi_pci_root *root = NULL;
- struct acpi_pci_root *tmp;
- acpi_status status = AE_OK;
- unsigned long long value = 0;
- acpi_handle handle = NULL;
+ unsigned long long segment, bus;
+ acpi_status status;
+ int result;
+ struct acpi_pci_root *root;
+ acpi_handle handle;
struct acpi_device *child;
u32 flags, base_flags;
+ segment = 0;
+ status = acpi_evaluate_integer(device->handle, METHOD_NAME__SEG, NULL,
+ &segment);
+ if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
+ printk(KERN_ERR PREFIX "can't evaluate _SEG\n");
+ return -ENODEV;
+ }
- if (!device)
- return -EINVAL;
+ /* Check _CRS first, then _BBN. If no _BBN, default to zero. */
+ bus = 0;
+ status = try_get_root_bridge_busnr(device->handle, &bus);
+ if (ACPI_FAILURE(status)) {
+ status = acpi_evaluate_integer(device->handle, METHOD_NAME__BBN, NULL, &bus);
+ if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
+ printk(KERN_ERR PREFIX
+ "no bus number in _CRS and can't evaluate _BBN\n");
+ return -ENODEV;
+ }
+ }
root = kzalloc(sizeof(struct acpi_pci_root), GFP_KERNEL);
if (!root)
return -ENOMEM;
- INIT_LIST_HEAD(&root->node);
+ INIT_LIST_HEAD(&root->node);
root->device = device;
+ root->segment = segment & 0xFFFF;
+ root->bus_nr = bus & 0xFF;
strcpy(acpi_device_name(device), ACPI_PCI_ROOT_DEVICE_NAME);
strcpy(acpi_device_class(device), ACPI_PCI_ROOT_CLASS);
device->driver_data = root;
- device->ops.bind = acpi_pci_bind;
-
/*
* All supported architectures that use ACPI have support for
* PCI domains, so we indicate this in _OSC support capabilities.
@@ -395,79 +514,6 @@ static int __devinit acpi_pci_root_add(struct acpi_device *device)
flags = base_flags = OSC_PCI_SEGMENT_GROUPS_SUPPORT;
acpi_pci_osc_support(root, flags);
- /*
- * Segment
- * -------
- * Obtained via _SEG, if exists, otherwise assumed to be zero (0).
- */
- status = acpi_evaluate_integer(device->handle, METHOD_NAME__SEG, NULL,
- &value);
- switch (status) {
- case AE_OK:
- root->id.segment = (u16) value;
- break;
- case AE_NOT_FOUND:
- ACPI_DEBUG_PRINT((ACPI_DB_INFO,
- "Assuming segment 0 (no _SEG)\n"));
- root->id.segment = 0;
- break;
- default:
- ACPI_EXCEPTION((AE_INFO, status, "Evaluating _SEG"));
- result = -ENODEV;
- goto end;
- }
-
- /*
- * Bus
- * ---
- * Obtained via _BBN, if exists, otherwise assumed to be zero (0).
- */
- status = acpi_evaluate_integer(device->handle, METHOD_NAME__BBN, NULL,
- &value);
- switch (status) {
- case AE_OK:
- root->id.bus = (u16) value;
- break;
- case AE_NOT_FOUND:
- ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Assuming bus 0 (no _BBN)\n"));
- root->id.bus = 0;
- break;
- default:
- ACPI_EXCEPTION((AE_INFO, status, "Evaluating _BBN"));
- result = -ENODEV;
- goto end;
- }
-
- /* Some systems have wrong _BBN */
- list_for_each_entry(tmp, &acpi_pci_roots, node) {
- if ((tmp->id.segment == root->id.segment)
- && (tmp->id.bus == root->id.bus)) {
- int bus = 0;
- acpi_status status;
-
- printk(KERN_ERR PREFIX
- "Wrong _BBN value, reboot"
- " and use option 'pci=noacpi'\n");
-
- status = try_get_root_bridge_busnr(device->handle, &bus);
- if (ACPI_FAILURE(status))
- break;
- if (bus != root->id.bus) {
- printk(KERN_INFO PREFIX
- "PCI _CRS %d overrides _BBN 0\n", bus);
- root->id.bus = bus;
- }
- break;
- }
- }
- /*
- * Device & Function
- * -----------------
- * Obtained from _ADR (which has already been evaluated for us).
- */
- root->id.device = device->pnp.bus_address >> 16;
- root->id.function = device->pnp.bus_address & 0xFFFF;
-
/*
* TBD: Need PCI interface for enumeration/configuration of roots.
*/
@@ -477,7 +523,7 @@ static int __devinit acpi_pci_root_add(struct acpi_device *device)
printk(KERN_INFO PREFIX "%s [%s] (%04x:%02x)\n",
acpi_device_name(device), acpi_device_bid(device),
- root->id.segment, root->id.bus);
+ root->segment, root->bus_nr);
/*
* Scan the Root Bridge
@@ -486,11 +532,11 @@ static int __devinit acpi_pci_root_add(struct acpi_device *device)
* PCI namespace does not get created until this call is made (and
* thus the root bridge's pci_dev does not exist).
*/
- root->bus = pci_acpi_scan_root(device, root->id.segment, root->id.bus);
+ root->bus = pci_acpi_scan_root(device, segment, bus);
if (!root->bus) {
printk(KERN_ERR PREFIX
"Bus %04x:%02x not present in PCI namespace\n",
- root->id.segment, root->id.bus);
+ root->segment, root->bus_nr);
result = -ENODEV;
goto end;
}
@@ -500,7 +546,7 @@ static int __devinit acpi_pci_root_add(struct acpi_device *device)
* -----------------------
* Thus binding the ACPI and PCI devices.
*/
- result = acpi_pci_bind_root(device, &root->id, root->bus);
+ result = acpi_pci_bind_root(device);
if (result)
goto end;
@@ -511,8 +557,7 @@ static int __devinit acpi_pci_root_add(struct acpi_device *device)
*/
status = acpi_get_handle(device->handle, METHOD_NAME__PRT, &handle);
if (ACPI_SUCCESS(status))
- result = acpi_pci_irq_add_prt(device->handle, root->id.segment,
- root->id.bus);
+ result = acpi_pci_irq_add_prt(device->handle, root->bus);
/*
* Scan and bind all _ADR-Based Devices
@@ -531,42 +576,28 @@ static int __devinit acpi_pci_root_add(struct acpi_device *device)
if (flags != base_flags)
acpi_pci_osc_support(root, flags);
- end:
- if (result) {
- if (!list_empty(&root->node))
- list_del(&root->node);
- kfree(root);
- }
+ return 0;
+end:
+ if (!list_empty(&root->node))
+ list_del(&root->node);
+ kfree(root);
return result;
}
static int acpi_pci_root_start(struct acpi_device *device)
{
- struct acpi_pci_root *root;
+ struct acpi_pci_root *root = acpi_driver_data(device);
-
- list_for_each_entry(root, &acpi_pci_roots, node) {
- if (root->device == device) {
- pci_bus_add_devices(root->bus);
- return 0;
- }
- }
- return -ENODEV;
+ pci_bus_add_devices(root->bus);
+ return 0;
}
static int acpi_pci_root_remove(struct acpi_device *device, int type)
{
- struct acpi_pci_root *root = NULL;
-
-
- if (!device || !acpi_driver_data(device))
- return -EINVAL;
-
- root = acpi_driver_data(device);
+ struct acpi_pci_root *root = acpi_driver_data(device);
kfree(root);
-
return 0;
}
diff --git a/drivers/acpi/power.c b/drivers/acpi/power.c
index 56665a63bf19..d74365d4a6e7 100644
--- a/drivers/acpi/power.c
+++ b/drivers/acpi/power.c
@@ -194,7 +194,7 @@ static int acpi_power_get_list_state(struct acpi_handle_list *list, int *state)
static int acpi_power_on(acpi_handle handle, struct acpi_device *dev)
{
- int result = 0, state;
+ int result = 0;
int found = 0;
acpi_status status = AE_OK;
struct acpi_power_resource *resource = NULL;
@@ -236,18 +236,6 @@ static int acpi_power_on(acpi_handle handle, struct acpi_device *dev)
if (ACPI_FAILURE(status))
return -ENODEV;
- if (!acpi_power_nocheck) {
- /*
- * If acpi_power_nocheck is set, it is unnecessary to check
- * the power state after power transition.
- */
- result = acpi_power_get_state(resource->device->handle,
- &state);
- if (result)
- return result;
- if (state != ACPI_POWER_RESOURCE_STATE_ON)
- return -ENOEXEC;
- }
/* Update the power resource's _device_ power state */
resource->device->power.state = ACPI_STATE_D0;
@@ -258,7 +246,7 @@ static int acpi_power_on(acpi_handle handle, struct acpi_device *dev)
static int acpi_power_off_device(acpi_handle handle, struct acpi_device *dev)
{
- int result = 0, state;
+ int result = 0;
acpi_status status = AE_OK;
struct acpi_power_resource *resource = NULL;
struct list_head *node, *next;
@@ -293,18 +281,6 @@ static int acpi_power_off_device(acpi_handle handle, struct acpi_device *dev)
if (ACPI_FAILURE(status))
return -ENODEV;
- if (!acpi_power_nocheck) {
- /*
- * If acpi_power_nocheck is set, it is unnecessary to check
- * the power state after power transition.
- */
- result = acpi_power_get_state(handle, &state);
- if (result)
- return result;
- if (state != ACPI_POWER_RESOURCE_STATE_OFF)
- return -ENOEXEC;
- }
-
/* Update the power resource's _device_ power state */
resource->device->power.state = ACPI_STATE_D3;
diff --git a/drivers/acpi/processor_core.c b/drivers/acpi/processor_core.c
index 23f0fb84f1c1..84e0f3c07442 100644
--- a/drivers/acpi/processor_core.c
+++ b/drivers/acpi/processor_core.c
@@ -89,7 +89,7 @@ static int acpi_processor_handle_eject(struct acpi_processor *pr);
static const struct acpi_device_id processor_device_ids[] = {
{ACPI_PROCESSOR_OBJECT_HID, 0},
- {ACPI_PROCESSOR_HID, 0},
+ {"ACPI0007", 0},
{"", 0},
};
MODULE_DEVICE_TABLE(acpi, processor_device_ids);
@@ -596,7 +596,21 @@ static int acpi_processor_get_info(struct acpi_device *device)
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
"No bus mastering arbitration control\n"));
- if (!strcmp(acpi_device_hid(device), ACPI_PROCESSOR_HID)) {
+ if (!strcmp(acpi_device_hid(device), ACPI_PROCESSOR_OBJECT_HID)) {
+ /* Declared with "Processor" statement; match ProcessorID */
+ status = acpi_evaluate_object(pr->handle, NULL, NULL, &buffer);
+ if (ACPI_FAILURE(status)) {
+ printk(KERN_ERR PREFIX "Evaluating processor object\n");
+ return -ENODEV;
+ }
+
+ /*
+ * TBD: Synch processor ID (via LAPIC/LSAPIC structures) on SMP.
+ * >>> 'acpi_get_processor_id(acpi_id, &id)' in
+ * arch/xxx/acpi.c
+ */
+ pr->acpi_id = object.processor.proc_id;
+ } else {
/*
* Declared with "Device" statement; match _UID.
* Note that we don't handle string _UIDs yet.
@@ -611,20 +625,6 @@ static int acpi_processor_get_info(struct acpi_device *device)
}
device_declaration = 1;
pr->acpi_id = value;
- } else {
- /* Declared with "Processor" statement; match ProcessorID */
- status = acpi_evaluate_object(pr->handle, NULL, NULL, &buffer);
- if (ACPI_FAILURE(status)) {
- printk(KERN_ERR PREFIX "Evaluating processor object\n");
- return -ENODEV;
- }
-
- /*
- * TBD: Synch processor ID (via LAPIC/LSAPIC structures) on SMP.
- * >>> 'acpi_get_processor_id(acpi_id, &id)' in
- * arch/xxx/acpi.c
- */
- pr->acpi_id = object.processor.proc_id;
}
cpu_index = get_cpu_id(pr->handle, device_declaration, pr->acpi_id);
@@ -649,7 +649,16 @@ static int acpi_processor_get_info(struct acpi_device *device)
return -ENODEV;
}
}
-
+ /*
+ * On some boxes several processors use the same processor bus id.
+ * But they are located in different scope. For example:
+ * \_SB.SCK0.CPU0
+ * \_SB.SCK1.CPU0
+ * Rename the processor device bus id. And the new bus id will be
+ * generated as the following format:
+ * CPU+CPU ID.
+ */
+ sprintf(acpi_device_bid(device), "CPU%X", pr->id);
ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Processor [%d:%d]\n", pr->id,
pr->acpi_id));
@@ -731,6 +740,8 @@ static int __cpuinit acpi_processor_start(struct acpi_device *device)
/* _PDC call should be done before doing anything else (if reqd.). */
arch_acpi_processor_init_pdc(pr);
acpi_processor_set_pdc(pr);
+ arch_acpi_processor_cleanup_pdc(pr);
+
#ifdef CONFIG_CPU_FREQ
acpi_processor_ppc_has_changed(pr);
#endif
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index 10a2d913635a..0efa59e7e3af 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -139,7 +139,7 @@ static void acpi_safe_halt(void)
* are affected too. We pick the most conservative approach: we assume
* that the local APIC stops in both C2 and C3.
*/
-static void acpi_timer_check_state(int state, struct acpi_processor *pr,
+static void lapic_timer_check_state(int state, struct acpi_processor *pr,
struct acpi_processor_cx *cx)
{
struct acpi_processor_power *pwr = &pr->power;
@@ -162,7 +162,7 @@ static void acpi_timer_check_state(int state, struct acpi_processor *pr,
pr->power.timer_broadcast_on_state = state;
}
-static void acpi_propagate_timer_broadcast(struct acpi_processor *pr)
+static void lapic_timer_propagate_broadcast(struct acpi_processor *pr)
{
unsigned long reason;
@@ -173,7 +173,7 @@ static void acpi_propagate_timer_broadcast(struct acpi_processor *pr)
}
/* Power(C) State timer broadcast control */
-static void acpi_state_timer_broadcast(struct acpi_processor *pr,
+static void lapic_timer_state_broadcast(struct acpi_processor *pr,
struct acpi_processor_cx *cx,
int broadcast)
{
@@ -190,10 +190,10 @@ static void acpi_state_timer_broadcast(struct acpi_processor *pr,
#else
-static void acpi_timer_check_state(int state, struct acpi_processor *pr,
+static void lapic_timer_check_state(int state, struct acpi_processor *pr,
struct acpi_processor_cx *cstate) { }
-static void acpi_propagate_timer_broadcast(struct acpi_processor *pr) { }
-static void acpi_state_timer_broadcast(struct acpi_processor *pr,
+static void lapic_timer_propagate_broadcast(struct acpi_processor *pr) { }
+static void lapic_timer_state_broadcast(struct acpi_processor *pr,
struct acpi_processor_cx *cx,
int broadcast)
{
@@ -515,7 +515,8 @@ static void acpi_processor_power_verify_c2(struct acpi_processor_cx *cx)
static void acpi_processor_power_verify_c3(struct acpi_processor *pr,
struct acpi_processor_cx *cx)
{
- static int bm_check_flag;
+ static int bm_check_flag = -1;
+ static int bm_control_flag = -1;
if (!cx->address)
@@ -545,12 +546,14 @@ static void acpi_processor_power_verify_c3(struct acpi_processor *pr,
}
/* All the logic here assumes flags.bm_check is same across all CPUs */
- if (!bm_check_flag) {
+ if (bm_check_flag == -1) {
/* Determine whether bm_check is needed based on CPU */
acpi_processor_power_init_bm_check(&(pr->flags), pr->id);
bm_check_flag = pr->flags.bm_check;
+ bm_control_flag = pr->flags.bm_control;
} else {
pr->flags.bm_check = bm_check_flag;
+ pr->flags.bm_control = bm_control_flag;
}
if (pr->flags.bm_check) {
@@ -614,29 +617,25 @@ static int acpi_processor_power_verify(struct acpi_processor *pr)
switch (cx->type) {
case ACPI_STATE_C1:
cx->valid = 1;
- acpi_timer_check_state(i, pr, cx);
break;
case ACPI_STATE_C2:
acpi_processor_power_verify_c2(cx);
- if (cx->valid)
- acpi_timer_check_state(i, pr, cx);
break;
case ACPI_STATE_C3:
acpi_processor_power_verify_c3(pr, cx);
- if (cx->valid)
- acpi_timer_check_state(i, pr, cx);
break;
}
- if (cx->valid)
- tsc_check_state(cx->type);
+ if (!cx->valid)
+ continue;
- if (cx->valid)
- working++;
+ lapic_timer_check_state(i, pr, cx);
+ tsc_check_state(cx->type);
+ working++;
}
- acpi_propagate_timer_broadcast(pr);
+ lapic_timer_propagate_broadcast(pr);
return (working);
}
@@ -839,7 +838,7 @@ static int acpi_idle_enter_c1(struct cpuidle_device *dev,
return 0;
}
- acpi_state_timer_broadcast(pr, cx, 1);
+ lapic_timer_state_broadcast(pr, cx, 1);
kt1 = ktime_get_real();
acpi_idle_do_entry(cx);
kt2 = ktime_get_real();
@@ -847,7 +846,7 @@ static int acpi_idle_enter_c1(struct cpuidle_device *dev,
local_irq_enable();
cx->usage++;
- acpi_state_timer_broadcast(pr, cx, 0);
+ lapic_timer_state_broadcast(pr, cx, 0);
return idle_time;
}
@@ -892,7 +891,7 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev,
* Must be done before busmaster disable as we might need to
* access HPET !
*/
- acpi_state_timer_broadcast(pr, cx, 1);
+ lapic_timer_state_broadcast(pr, cx, 1);
if (cx->type == ACPI_STATE_C3)
ACPI_FLUSH_CPU_CACHE();
@@ -914,7 +913,7 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev,
cx->usage++;
- acpi_state_timer_broadcast(pr, cx, 0);
+ lapic_timer_state_broadcast(pr, cx, 0);
cx->time += sleep_ticks;
return idle_time;
}
@@ -981,7 +980,7 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev,
* Must be done before busmaster disable as we might need to
* access HPET !
*/
- acpi_state_timer_broadcast(pr, cx, 1);
+ lapic_timer_state_broadcast(pr, cx, 1);
kt1 = ktime_get_real();
/*
@@ -1026,7 +1025,7 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev,
cx->usage++;
- acpi_state_timer_broadcast(pr, cx, 0);
+ lapic_timer_state_broadcast(pr, cx, 0);
cx->time += sleep_ticks;
return idle_time;
}
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index 8ff510b91d88..781435d7e369 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -95,7 +95,7 @@ acpi_device_modalias_show(struct device *dev, struct device_attribute *attr, cha
}
static DEVICE_ATTR(modalias, 0444, acpi_device_modalias_show, NULL);
-static int acpi_bus_hot_remove_device(void *context)
+static void acpi_bus_hot_remove_device(void *context)
{
struct acpi_device *device;
acpi_handle handle = context;
@@ -104,10 +104,10 @@ static int acpi_bus_hot_remove_device(void *context)
acpi_status status = AE_OK;
if (acpi_bus_get_device(handle, &device))
- return 0;
+ return;
if (!device)
- return 0;
+ return;
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
"Hot-removing device %s...\n", dev_name(&device->dev)));
@@ -115,7 +115,7 @@ static int acpi_bus_hot_remove_device(void *context)
if (acpi_bus_trim(device, 1)) {
printk(KERN_ERR PREFIX
"Removing device failed\n");
- return -1;
+ return;
}
/* power off device */
@@ -142,9 +142,10 @@ static int acpi_bus_hot_remove_device(void *context)
*/
status = acpi_evaluate_object(handle, "_EJ0", &arg_list, NULL);
if (ACPI_FAILURE(status))
- return -ENODEV;
+ printk(KERN_WARNING PREFIX
+ "Eject device failed\n");
- return 0;
+ return;
}
static ssize_t
@@ -155,7 +156,6 @@ acpi_eject_store(struct device *d, struct device_attribute *attr,
acpi_status status;
acpi_object_type type = 0;
struct acpi_device *acpi_device = to_acpi_device(d);
- struct task_struct *task;
if ((!count) || (buf[0] != '1')) {
return -EINVAL;
@@ -172,11 +172,7 @@ acpi_eject_store(struct device *d, struct device_attribute *attr,
goto err;
}
- /* remove the device in another thread to fix the deadlock issue */
- task = kthread_run(acpi_bus_hot_remove_device,
- acpi_device->handle, "acpi_hot_remove_device");
- if (IS_ERR(task))
- ret = PTR_ERR(task);
+ acpi_os_hotplug_execute(acpi_bus_hot_remove_device, acpi_device->handle);
err:
return ret;
}
@@ -198,12 +194,12 @@ acpi_device_path_show(struct device *dev, struct device_attribute *attr, char *b
int result;
result = acpi_get_name(acpi_dev->handle, ACPI_FULL_PATHNAME, &path);
- if(result)
+ if (result)
goto end;
result = sprintf(buf, "%s\n", (char*)path.pointer);
kfree(path.pointer);
- end:
+end:
return result;
}
static DEVICE_ATTR(path, 0444, acpi_device_path_show, NULL);
@@ -217,21 +213,21 @@ static int acpi_device_setup_files(struct acpi_device *dev)
/*
* Devices gotten from FADT don't have a "path" attribute
*/
- if(dev->handle) {
+ if (dev->handle) {
result = device_create_file(&dev->dev, &dev_attr_path);
- if(result)
+ if (result)
goto end;
}
- if(dev->flags.hardware_id) {
+ if (dev->flags.hardware_id) {
result = device_create_file(&dev->dev, &dev_attr_hid);
- if(result)
+ if (result)
goto end;
}
- if (dev->flags.hardware_id || dev->flags.compatible_ids){
+ if (dev->flags.hardware_id || dev->flags.compatible_ids) {
result = device_create_file(&dev->dev, &dev_attr_modalias);
- if(result)
+ if (result)
goto end;
}
@@ -242,7 +238,7 @@ static int acpi_device_setup_files(struct acpi_device *dev)
status = acpi_get_handle(dev->handle, "_EJ0", &temp);
if (ACPI_SUCCESS(status))
result = device_create_file(&dev->dev, &dev_attr_eject);
- end:
+end:
return result;
}
@@ -262,9 +258,9 @@ static void acpi_device_remove_files(struct acpi_device *dev)
if (dev->flags.hardware_id || dev->flags.compatible_ids)
device_remove_file(&dev->dev, &dev_attr_modalias);
- if(dev->flags.hardware_id)
+ if (dev->flags.hardware_id)
device_remove_file(&dev->dev, &dev_attr_hid);
- if(dev->handle)
+ if (dev->handle)
device_remove_file(&dev->dev, &dev_attr_path);
}
/* --------------------------------------------------------------------------
@@ -512,7 +508,7 @@ static int acpi_device_register(struct acpi_device *device,
break;
}
}
- if(!found) {
+ if (!found) {
acpi_device_bus_id = new_bus_id;
strcpy(acpi_device_bus_id->bus_id, device->flags.hardware_id ? device->pnp.hardware_id : "device");
acpi_device_bus_id->instance_no = 0;
@@ -530,22 +526,21 @@ static int acpi_device_register(struct acpi_device *device,
if (device->parent)
device->dev.parent = &parent->dev;
device->dev.bus = &acpi_bus_type;
- device_initialize(&device->dev);
device->dev.release = &acpi_device_release;
- result = device_add(&device->dev);
- if(result) {
- dev_err(&device->dev, "Error adding device\n");
+ result = device_register(&device->dev);
+ if (result) {
+ dev_err(&device->dev, "Error registering device\n");
goto end;
}
result = acpi_device_setup_files(device);
- if(result)
+ if (result)
printk(KERN_ERR PREFIX "Error creating sysfs interface for device %s\n",
dev_name(&device->dev));
device->removal_type = ACPI_BUS_REMOVAL_NORMAL;
return 0;
- end:
+end:
mutex_lock(&acpi_device_lock);
if (device->parent)
list_del(&device->node);
@@ -577,7 +572,7 @@ static void acpi_device_unregister(struct acpi_device *device, int type)
* @device: the device to add and initialize
* @driver: driver for the device
*
- * Used to initialize a device via its device driver. Called whenever a
+ * Used to initialize a device via its device driver. Called whenever a
* driver is bound to a device. Invokes the driver's add() ops.
*/
static int
@@ -585,7 +580,6 @@ acpi_bus_driver_init(struct acpi_device *device, struct acpi_driver *driver)
{
int result = 0;
-
if (!device || !driver)
return -EINVAL;
@@ -802,7 +796,7 @@ static int acpi_bus_get_wakeup_device_flags(struct acpi_device *device)
if (!acpi_match_device_ids(device, button_device_ids))
device->wakeup.flags.run_wake = 1;
- end:
+end:
if (ACPI_FAILURE(status))
device->flags.wake_capable = 0;
return 0;
@@ -1070,7 +1064,7 @@ static void acpi_device_set_id(struct acpi_device *device,
break;
}
- /*
+ /*
* \_SB
* ----
* Fix for the system root bus device -- the only root-level device.
@@ -1320,7 +1314,7 @@ acpi_add_single_object(struct acpi_device **child,
device->parent->ops.bind(device);
}
- end:
+end:
if (!result)
*child = device;
else {
@@ -1464,7 +1458,6 @@ acpi_bus_add(struct acpi_device **child,
return result;
}
-
EXPORT_SYMBOL(acpi_bus_add);
int acpi_bus_start(struct acpi_device *device)
@@ -1484,7 +1477,6 @@ int acpi_bus_start(struct acpi_device *device)
}
return result;
}
-
EXPORT_SYMBOL(acpi_bus_start);
int acpi_bus_trim(struct acpi_device *start, int rmdevice)
@@ -1542,7 +1534,6 @@ int acpi_bus_trim(struct acpi_device *start, int rmdevice)
}
EXPORT_SYMBOL_GPL(acpi_bus_trim);
-
static int acpi_bus_scan_fixed(struct acpi_device *root)
{
int result = 0;
@@ -1610,6 +1601,6 @@ int __init acpi_scan_init(void)
if (result)
acpi_device_unregister(acpi_root, ACPI_BUS_REMOVAL_NORMAL);
- Done:
+Done:
return result;
}
diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c
index 1bdfb37377e3..8851315ce858 100644
--- a/drivers/acpi/video.c
+++ b/drivers/acpi/video.c
@@ -76,6 +76,7 @@ MODULE_LICENSE("GPL");
static int brightness_switch_enabled = 1;
module_param(brightness_switch_enabled, bool, 0644);
+static int register_count = 0;
static int acpi_video_bus_add(struct acpi_device *device);
static int acpi_video_bus_remove(struct acpi_device *device, int type);
static int acpi_video_resume(struct acpi_device *device);
@@ -586,6 +587,14 @@ static struct dmi_system_id video_dmi_table[] __initdata = {
DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5315"),
},
},
+ {
+ .callback = video_set_bqc_offset,
+ .ident = "Acer Aspire 7720",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 7720"),
+ },
+ },
{}
};
@@ -976,6 +985,11 @@ static void acpi_video_device_find_cap(struct acpi_video_device *device)
device->backlight->props.max_brightness = device->brightness->count-3;
kfree(name);
+ result = sysfs_create_link(&device->backlight->dev.kobj,
+ &device->dev->dev.kobj, "device");
+ if (result)
+ printk(KERN_ERR PREFIX "Create sysfs link\n");
+
device->cdev = thermal_cooling_device_register("LCD",
device->dev, &video_cooling_ops);
if (IS_ERR(device->cdev))
@@ -1054,15 +1068,15 @@ static void acpi_video_bus_find_cap(struct acpi_video_bus *video)
static int acpi_video_bus_check(struct acpi_video_bus *video)
{
acpi_status status = -ENOENT;
- struct device *dev;
+ struct pci_dev *dev;
if (!video)
return -EINVAL;
- dev = acpi_get_physical_pci_device(video->device->handle);
+ dev = acpi_get_pci_dev(video->device->handle);
if (!dev)
return -ENODEV;
- put_device(dev);
+ pci_dev_put(dev);
/* Since there is no HID, CID and so on for VGA driver, we have
* to check well known required nodes.
@@ -1990,6 +2004,7 @@ static int acpi_video_bus_put_one_device(struct acpi_video_device *device)
status = acpi_remove_notify_handler(device->dev->handle,
ACPI_DEVICE_NOTIFY,
acpi_video_device_notify);
+ sysfs_remove_link(&device->backlight->dev.kobj, "device");
backlight_device_unregister(device->backlight);
if (device->cdev) {
sysfs_remove_link(&device->dev->dev.kobj,
@@ -2318,6 +2333,13 @@ static int __init intel_opregion_present(void)
int acpi_video_register(void)
{
int result = 0;
+ if (register_count) {
+ /*
+ * if the function of acpi_video_register is already called,
+ * don't register the acpi_vide_bus again and return no error.
+ */
+ return 0;
+ }
acpi_video_dir = proc_mkdir(ACPI_VIDEO_CLASS, acpi_root_dir);
if (!acpi_video_dir)
@@ -2329,10 +2351,35 @@ int acpi_video_register(void)
return -ENODEV;
}
+ /*
+ * When the acpi_video_bus is loaded successfully, increase
+ * the counter reference.
+ */
+ register_count = 1;
+
return 0;
}
EXPORT_SYMBOL(acpi_video_register);
+void acpi_video_unregister(void)
+{
+ if (!register_count) {
+ /*
+ * If the acpi video bus is already unloaded, don't
+ * unload it again and return directly.
+ */
+ return;
+ }
+ acpi_bus_unregister_driver(&acpi_video_bus);
+
+ remove_proc_entry(ACPI_VIDEO_CLASS, acpi_root_dir);
+
+ register_count = 0;
+
+ return;
+}
+EXPORT_SYMBOL(acpi_video_unregister);
+
/*
* This is kind of nasty. Hardware using Intel chipsets may require
* the video opregion code to be run first in order to initialise
@@ -2350,16 +2397,12 @@ static int __init acpi_video_init(void)
return acpi_video_register();
}
-void acpi_video_exit(void)
+static void __exit acpi_video_exit(void)
{
-
- acpi_bus_unregister_driver(&acpi_video_bus);
-
- remove_proc_entry(ACPI_VIDEO_CLASS, acpi_root_dir);
+ acpi_video_unregister();
return;
}
-EXPORT_SYMBOL(acpi_video_exit);
module_init(acpi_video_init);
module_exit(acpi_video_exit);
diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c
index 09737275e25f..7cd2b63435ea 100644
--- a/drivers/acpi/video_detect.c
+++ b/drivers/acpi/video_detect.c
@@ -10,7 +10,7 @@
* assinged
*
* After PCI devices are glued with ACPI devices
- * acpi_get_physical_pci_device() can be called to identify ACPI graphics
+ * acpi_get_pci_dev() can be called to identify ACPI graphics
* devices for which a real graphics card is plugged in
*
* Now acpi_video_get_capabilities() can be called to check which
@@ -36,6 +36,7 @@
#include <linux/acpi.h>
#include <linux/dmi.h>
+#include <linux/pci.h>
ACPI_MODULE_NAME("video");
#define _COMPONENT ACPI_VIDEO_COMPONENT
@@ -109,7 +110,7 @@ static acpi_status
find_video(acpi_handle handle, u32 lvl, void *context, void **rv)
{
long *cap = context;
- struct device *dev;
+ struct pci_dev *dev;
struct acpi_device *acpi_dev;
const struct acpi_device_id video_ids[] = {
@@ -120,10 +121,10 @@ find_video(acpi_handle handle, u32 lvl, void *context, void **rv)
return AE_OK;
if (!acpi_match_device_ids(acpi_dev, video_ids)) {
- dev = acpi_get_physical_pci_device(handle);
+ dev = acpi_get_pci_dev(handle);
if (!dev)
return AE_OK;
- put_device(dev);
+ pci_dev_put(dev);
*cap |= acpi_is_video_device(acpi_dev);
}
return AE_OK;
diff --git a/drivers/amba/bus.c b/drivers/amba/bus.c
index 3d763fdf99b7..246650673010 100644
--- a/drivers/amba/bus.c
+++ b/drivers/amba/bus.c
@@ -207,6 +207,16 @@ int amba_device_register(struct amba_device *dev, struct resource *parent)
void __iomem *tmp;
int i, ret;
+ device_initialize(&dev->dev);
+
+ /*
+ * Copy from device_add
+ */
+ if (dev->dev.init_name) {
+ dev_set_name(&dev->dev, "%s", dev->dev.init_name);
+ dev->dev.init_name = NULL;
+ }
+
dev->dev.release = amba_device_release;
dev->dev.bus = &amba_bustype;
dev->dev.dma_mask = &dev->dma_mask;
@@ -240,7 +250,7 @@ int amba_device_register(struct amba_device *dev, struct resource *parent)
goto err_release;
}
- ret = device_register(&dev->dev);
+ ret = device_add(&dev->dev);
if (ret)
goto err_release;
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig
index 2aa1908e5ce0..b17c57f85032 100644
--- a/drivers/ata/Kconfig
+++ b/drivers/ata/Kconfig
@@ -679,6 +679,14 @@ config PATA_PLATFORM
If unsure, say N.
+config PATA_AT91
+ tristate "PATA support for AT91SAM9260"
+ depends on ARM && ARCH_AT91
+ help
+ This option enables support for IDE devices on the Atmel AT91SAM9260 SoC.
+
+ If unsure, say N.
+
config PATA_OF_PLATFORM
tristate "OpenFirmware platform device PATA support"
depends on PATA_PLATFORM && PPC_OF
diff --git a/drivers/ata/Makefile b/drivers/ata/Makefile
index 1558059874f0..38906f9bbb4e 100644
--- a/drivers/ata/Makefile
+++ b/drivers/ata/Makefile
@@ -72,6 +72,7 @@ obj-$(CONFIG_PATA_SCH) += pata_sch.o
obj-$(CONFIG_PATA_BF54X) += pata_bf54x.o
obj-$(CONFIG_PATA_OCTEON_CF) += pata_octeon_cf.o
obj-$(CONFIG_PATA_PLATFORM) += pata_platform.o
+obj-$(CONFIG_PATA_AT91) += pata_at91.o
obj-$(CONFIG_PATA_OF_PLATFORM) += pata_of_platform.o
obj-$(CONFIG_PATA_ICSIDE) += pata_icside.o
# Should be last but two libata driver
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index ca4d208ddf3b..045a486a09ea 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -125,19 +125,19 @@ MODULE_PARM_DESC(force, "Force ATA configurations including cable type, link spe
static int atapi_enabled = 1;
module_param(atapi_enabled, int, 0444);
-MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on)");
+MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on [default])");
static int atapi_dmadir = 0;
module_param(atapi_dmadir, int, 0444);
-MODULE_PARM_DESC(atapi_dmadir, "Enable ATAPI DMADIR bridge support (0=off, 1=on)");
+MODULE_PARM_DESC(atapi_dmadir, "Enable ATAPI DMADIR bridge support (0=off [default], 1=on)");
int atapi_passthru16 = 1;
module_param(atapi_passthru16, int, 0444);
-MODULE_PARM_DESC(atapi_passthru16, "Enable ATA_16 passthru for ATAPI devices; on by default (0=off, 1=on)");
+MODULE_PARM_DESC(atapi_passthru16, "Enable ATA_16 passthru for ATAPI devices (0=off, 1=on [default])");
int libata_fua = 0;
module_param_named(fua, libata_fua, int, 0444);
-MODULE_PARM_DESC(fua, "FUA support (0=off, 1=on)");
+MODULE_PARM_DESC(fua, "FUA support (0=off [default], 1=on)");
static int ata_ignore_hpa;
module_param_named(ignore_hpa, ata_ignore_hpa, int, 0644);
@@ -153,11 +153,11 @@ MODULE_PARM_DESC(ata_probe_timeout, "Set ATA probing timeout (seconds)");
int libata_noacpi = 0;
module_param_named(noacpi, libata_noacpi, int, 0444);
-MODULE_PARM_DESC(noacpi, "Disables the use of ACPI in probe/suspend/resume when set");
+MODULE_PARM_DESC(noacpi, "Disable the use of ACPI in probe/suspend/resume (0=off [default], 1=on)");
int libata_allow_tpm = 0;
module_param_named(allow_tpm, libata_allow_tpm, int, 0444);
-MODULE_PARM_DESC(allow_tpm, "Permit the use of TPM commands");
+MODULE_PARM_DESC(allow_tpm, "Permit the use of TPM commands (0=off [default], 1=on)");
MODULE_AUTHOR("Jeff Garzik");
MODULE_DESCRIPTION("Library module for ATA devices");
@@ -1993,11 +1993,17 @@ unsigned int ata_do_simple_cmd(struct ata_device *dev, u8 cmd)
* Check if the current speed of the device requires IORDY. Used
* by various controllers for chip configuration.
*/
-
unsigned int ata_pio_need_iordy(const struct ata_device *adev)
{
- /* Controller doesn't support IORDY. Probably a pointless check
- as the caller should know this */
+ /* Don't set IORDY if we're preparing for reset. IORDY may
+ * lead to controller lock up on certain controllers if the
+ * port is not occupied. See bko#11703 for details.
+ */
+ if (adev->link->ap->pflags & ATA_PFLAG_RESETTING)
+ return 0;
+ /* Controller doesn't support IORDY. Probably a pointless
+ * check as the caller should know this.
+ */
if (adev->link->ap->flags & ATA_FLAG_NO_IORDY)
return 0;
/* CF spec. r4.1 Table 22 says no iordy on PIO5 and PIO6. */
@@ -2020,7 +2026,6 @@ unsigned int ata_pio_need_iordy(const struct ata_device *adev)
* Compute the highest mode possible if we are not using iordy. Return
* -1 if no iordy mode is available.
*/
-
static u32 ata_pio_mask_no_iordy(const struct ata_device *adev)
{
/* If we have no drive specific rule, then PIO 2 is non IORDY */
diff --git a/drivers/ata/pata_at91.c b/drivers/ata/pata_at91.c
new file mode 100644
index 000000000000..4b27617be26d
--- /dev/null
+++ b/drivers/ata/pata_at91.c
@@ -0,0 +1,361 @@
+/*
+ * PATA driver for AT91SAM9260 Static Memory Controller
+ * with CompactFlash interface in True IDE mode
+ *
+ * Copyright (C) 2009 Matyukevich Sergey
+ *
+ * Based on:
+ * * generic platform driver by Paul Mundt: drivers/ata/pata_platform.c
+ * * pata_at32 driver by Kristoffer Nyborg Gregertsen
+ * * at91_ide driver by Stanislaw Gruszka
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/blkdev.h>
+#include <scsi/scsi_host.h>
+#include <linux/ata.h>
+#include <linux/clk.h>
+#include <linux/libata.h>
+#include <linux/platform_device.h>
+#include <linux/ata_platform.h>
+
+#include <mach/at91sam9260_matrix.h>
+#include <mach/at91sam9_smc.h>
+#include <mach/at91sam9260.h>
+#include <mach/board.h>
+#include <mach/gpio.h>
+
+
+#define DRV_NAME "pata_at91"
+#define DRV_VERSION "0.1"
+
+#define CF_IDE_OFFSET 0x00c00000
+#define CF_ALT_IDE_OFFSET 0x00e00000
+#define CF_IDE_RES_SIZE 0x08
+
+struct at91_ide_info {
+ unsigned long mode;
+ unsigned int cs;
+
+ void __iomem *ide_addr;
+ void __iomem *alt_addr;
+};
+
+const struct ata_timing initial_timing =
+ {XFER_PIO_0, 70, 290, 240, 600, 165, 150, 600, 0};
+
+static unsigned int calc_mck_cycles(unsigned int ns, unsigned int mck_hz)
+{
+ unsigned long mul;
+
+ /*
+ * cycles = x [nsec] * f [Hz] / 10^9 [ns in sec] =
+ * x * (f / 1_000_000_000) =
+ * x * ((f * 65536) / 1_000_000_000) / 65536 =
+ * x * (((f / 10_000) * 65536) / 100_000) / 65536 =
+ */
+
+ mul = (mck_hz / 10000) << 16;
+ mul /= 100000;
+
+ return (ns * mul + 65536) >> 16; /* rounding */
+}
+
+static void set_smc_mode(struct at91_ide_info *info)
+{
+ at91_sys_write(AT91_SMC_MODE(info->cs), info->mode);
+ return;
+}
+
+static void set_smc_timing(struct device *dev,
+ struct at91_ide_info *info, const struct ata_timing *ata)
+{
+ int read_cycle, write_cycle, active, recover;
+ int nrd_setup, nrd_pulse, nrd_recover;
+ int nwe_setup, nwe_pulse;
+
+ int ncs_write_setup, ncs_write_pulse;
+ int ncs_read_setup, ncs_read_pulse;
+
+ unsigned int mck_hz;
+ struct clk *mck;
+
+ read_cycle = ata->cyc8b;
+ nrd_setup = ata->setup;
+ nrd_pulse = ata->act8b;
+ nrd_recover = ata->rec8b;
+
+ mck = clk_get(NULL, "mck");
+ BUG_ON(IS_ERR(mck));
+ mck_hz = clk_get_rate(mck);
+
+ read_cycle = calc_mck_cycles(read_cycle, mck_hz);
+ nrd_setup = calc_mck_cycles(nrd_setup, mck_hz);
+ nrd_pulse = calc_mck_cycles(nrd_pulse, mck_hz);
+ nrd_recover = calc_mck_cycles(nrd_recover, mck_hz);
+
+ clk_put(mck);
+
+ active = nrd_setup + nrd_pulse;
+ recover = read_cycle - active;
+
+ /* Need at least two cycles recovery */
+ if (recover < 2)
+ read_cycle = active + 2;
+
+ /* (CS0, CS1, DIR, OE) <= (CFCE1, CFCE2, CFRNW, NCSX) timings */
+ ncs_read_setup = 1;
+ ncs_read_pulse = read_cycle - 2;
+
+ /* Write timings same as read timings */
+ write_cycle = read_cycle;
+ nwe_setup = nrd_setup;
+ nwe_pulse = nrd_pulse;
+ ncs_write_setup = ncs_read_setup;
+ ncs_write_pulse = ncs_read_pulse;
+
+ dev_dbg(dev, "ATA timings: nrd_setup = %d nrd_pulse = %d nrd_cycle = %d\n",
+ nrd_setup, nrd_pulse, read_cycle);
+ dev_dbg(dev, "ATA timings: nwe_setup = %d nwe_pulse = %d nwe_cycle = %d\n",
+ nwe_setup, nwe_pulse, write_cycle);
+ dev_dbg(dev, "ATA timings: ncs_read_setup = %d ncs_read_pulse = %d\n",
+ ncs_read_setup, ncs_read_pulse);
+ dev_dbg(dev, "ATA timings: ncs_write_setup = %d ncs_write_pulse = %d\n",
+ ncs_write_setup, ncs_write_pulse);
+
+ at91_sys_write(AT91_SMC_SETUP(info->cs),
+ AT91_SMC_NWESETUP_(nwe_setup) |
+ AT91_SMC_NRDSETUP_(nrd_setup) |
+ AT91_SMC_NCS_WRSETUP_(ncs_write_setup) |
+ AT91_SMC_NCS_RDSETUP_(ncs_read_setup));
+
+ at91_sys_write(AT91_SMC_PULSE(info->cs),
+ AT91_SMC_NWEPULSE_(nwe_pulse) |
+ AT91_SMC_NRDPULSE_(nrd_pulse) |
+ AT91_SMC_NCS_WRPULSE_(ncs_write_pulse) |
+ AT91_SMC_NCS_RDPULSE_(ncs_read_pulse));
+
+ at91_sys_write(AT91_SMC_CYCLE(info->cs),
+ AT91_SMC_NWECYCLE_(write_cycle) |
+ AT91_SMC_NRDCYCLE_(read_cycle));
+
+ return;
+}
+
+static void pata_at91_set_piomode(struct ata_port *ap, struct ata_device *adev)
+{
+ struct at91_ide_info *info = ap->host->private_data;
+ struct ata_timing timing;
+ int ret;
+
+ /* Compute ATA timing and set it to SMC */
+ ret = ata_timing_compute(adev, adev->pio_mode, &timing, 1000, 0);
+ if (ret) {
+ dev_warn(ap->dev, "Failed to compute ATA timing %d, \
+ set PIO_0 timing\n", ret);
+ set_smc_timing(ap->dev, info, &initial_timing);
+ } else {
+ set_smc_timing(ap->dev, info, &timing);
+ }
+
+ /* Setup SMC mode */
+ set_smc_mode(info);
+
+ return;
+}
+
+static unsigned int pata_at91_data_xfer_noirq(struct ata_device *dev,
+ unsigned char *buf, unsigned int buflen, int rw)
+{
+ struct at91_ide_info *info = dev->link->ap->host->private_data;
+ unsigned int consumed;
+ unsigned long flags;
+ unsigned int mode;
+
+ local_irq_save(flags);
+ mode = at91_sys_read(AT91_SMC_MODE(info->cs));
+
+ /* set 16bit mode before writing data */
+ at91_sys_write(AT91_SMC_MODE(info->cs),
+ (mode & ~AT91_SMC_DBW) | AT91_SMC_DBW_16);
+
+ consumed = ata_sff_data_xfer(dev, buf, buflen, rw);
+
+ /* restore 8bit mode after data is written */
+ at91_sys_write(AT91_SMC_MODE(info->cs),
+ (mode & ~AT91_SMC_DBW) | AT91_SMC_DBW_8);
+
+ local_irq_restore(flags);
+ return consumed;
+}
+
+static struct scsi_host_template pata_at91_sht = {
+ ATA_PIO_SHT(DRV_NAME),
+};
+
+static struct ata_port_operations pata_at91_port_ops = {
+ .inherits = &ata_sff_port_ops,
+
+ .sff_data_xfer = pata_at91_data_xfer_noirq,
+ .set_piomode = pata_at91_set_piomode,
+ .cable_detect = ata_cable_40wire,
+ .port_start = ATA_OP_NULL,
+};
+
+static int __devinit pata_at91_probe(struct platform_device *pdev)
+{
+ struct at91_cf_data *board = pdev->dev.platform_data;
+ struct device *dev = &pdev->dev;
+ struct at91_ide_info *info;
+ struct resource *mem_res;
+ struct ata_host *host;
+ struct ata_port *ap;
+ int irq_flags = 0;
+ int irq = 0;
+ int ret;
+
+ /* get platform resources: IO/CTL memories and irq/rst pins */
+
+ if (pdev->num_resources != 1) {
+ dev_err(&pdev->dev, "invalid number of resources\n");
+ return -EINVAL;
+ }
+
+ mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+ if (!mem_res) {
+ dev_err(dev, "failed to get mem resource\n");
+ return -EINVAL;
+ }
+
+ irq = board->irq_pin;
+
+ /* init ata host */
+
+ host = ata_host_alloc(dev, 1);
+
+ if (!host)
+ return -ENOMEM;
+
+ ap = host->ports[0];
+ ap->ops = &pata_at91_port_ops;
+ ap->flags |= ATA_FLAG_SLAVE_POSS;
+ ap->pio_mask = ATA_PIO4;
+
+ if (!irq) {
+ ap->flags |= ATA_FLAG_PIO_POLLING;
+ ata_port_desc(ap, "no IRQ, using PIO polling");
+ }
+
+ info = kzalloc(sizeof(*info), GFP_KERNEL);
+
+ if (!info) {
+ dev_err(dev, "failed to allocate memory for private data\n");
+ return -ENOMEM;
+ }
+
+ info->cs = board->chipselect;
+ info->mode = AT91_SMC_READMODE | AT91_SMC_WRITEMODE |
+ AT91_SMC_EXNWMODE_READY | AT91_SMC_BAT_SELECT |
+ AT91_SMC_DBW_8 | AT91_SMC_TDF_(0);
+
+ info->ide_addr = devm_ioremap(dev,
+ mem_res->start + CF_IDE_OFFSET, CF_IDE_RES_SIZE);
+
+ if (!info->ide_addr) {
+ dev_err(dev, "failed to map IO base\n");
+ ret = -ENOMEM;
+ goto err_ide_ioremap;
+ }
+
+ info->alt_addr = devm_ioremap(dev,
+ mem_res->start + CF_ALT_IDE_OFFSET, CF_IDE_RES_SIZE);
+
+ if (!info->alt_addr) {
+ dev_err(dev, "failed to map CTL base\n");
+ ret = -ENOMEM;
+ goto err_alt_ioremap;
+ }
+
+ ap->ioaddr.cmd_addr = info->ide_addr;
+ ap->ioaddr.ctl_addr = info->alt_addr + 0x06;
+ ap->ioaddr.altstatus_addr = ap->ioaddr.ctl_addr;
+
+ ata_sff_std_ports(&ap->ioaddr);
+
+ ata_port_desc(ap, "mmio cmd 0x%llx ctl 0x%llx",
+ (unsigned long long)mem_res->start + CF_IDE_OFFSET,
+ (unsigned long long)mem_res->start + CF_ALT_IDE_OFFSET);
+
+ host->private_data = info;
+
+ return ata_host_activate(host, irq ? gpio_to_irq(irq) : 0,
+ irq ? ata_sff_interrupt : NULL,
+ irq_flags, &pata_at91_sht);
+
+err_alt_ioremap:
+ devm_iounmap(dev, info->ide_addr);
+
+err_ide_ioremap:
+ kfree(info);
+
+ return ret;
+}
+
+static int __devexit pata_at91_remove(struct platform_device *pdev)
+{
+ struct ata_host *host = dev_get_drvdata(&pdev->dev);
+ struct at91_ide_info *info = host->private_data;
+ struct device *dev = &pdev->dev;
+
+ if (!host)
+ return 0;
+
+ ata_host_detach(host);
+
+ if (!info)
+ return 0;
+
+ devm_iounmap(dev, info->ide_addr);
+ devm_iounmap(dev, info->alt_addr);
+
+ kfree(info);
+ return 0;
+}
+
+static struct platform_driver pata_at91_driver = {
+ .probe = pata_at91_probe,
+ .remove = __devexit_p(pata_at91_remove),
+ .driver = {
+ .name = DRV_NAME,
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init pata_at91_init(void)
+{
+ return platform_driver_register(&pata_at91_driver);
+}
+
+static void __exit pata_at91_exit(void)
+{
+ platform_driver_unregister(&pata_at91_driver);
+}
+
+
+module_init(pata_at91_init);
+module_exit(pata_at91_exit);
+
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Driver for CF in True IDE mode on AT91SAM9260 SoC");
+MODULE_AUTHOR("Matyukevich Sergey");
+MODULE_VERSION(DRV_VERSION);
+
diff --git a/drivers/ata/sata_fsl.c b/drivers/ata/sata_fsl.c
index 36b8629203be..94eaa432c40a 100644
--- a/drivers/ata/sata_fsl.c
+++ b/drivers/ata/sata_fsl.c
@@ -1378,6 +1378,37 @@ static int sata_fsl_remove(struct of_device *ofdev)
return 0;
}
+#ifdef CONFIG_PM
+static int sata_fsl_suspend(struct of_device *op, pm_message_t state)
+{
+ struct ata_host *host = dev_get_drvdata(&op->dev);
+ return ata_host_suspend(host, state);
+}
+
+static int sata_fsl_resume(struct of_device *op)
+{
+ struct ata_host *host = dev_get_drvdata(&op->dev);
+ struct sata_fsl_host_priv *host_priv = host->private_data;
+ int ret;
+ void __iomem *hcr_base = host_priv->hcr_base;
+ struct ata_port *ap = host->ports[0];
+ struct sata_fsl_port_priv *pp = ap->private_data;
+
+ ret = sata_fsl_init_controller(host);
+ if (ret) {
+ dev_printk(KERN_ERR, &op->dev,
+ "Error initialize hardware\n");
+ return ret;
+ }
+
+ /* Recovery the CHBA register in host controller cmd register set */
+ iowrite32(pp->cmdslot_paddr & 0xffffffff, hcr_base + CHBA);
+
+ ata_host_resume(host);
+ return 0;
+}
+#endif
+
static struct of_device_id fsl_sata_match[] = {
{
.compatible = "fsl,pq-sata",
@@ -1392,6 +1423,10 @@ static struct of_platform_driver fsl_sata_driver = {
.match_table = fsl_sata_match,
.probe = sata_fsl_probe,
.remove = sata_fsl_remove,
+#ifdef CONFIG_PM
+ .suspend = sata_fsl_suspend,
+ .resume = sata_fsl_resume,
+#endif
};
static int __init sata_fsl_init(void)
diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c
index ddeb819c8f87..fc466531260e 100644
--- a/drivers/base/firmware_class.c
+++ b/drivers/base/firmware_class.c
@@ -357,7 +357,7 @@ static void fw_dev_release(struct device *dev)
kfree(fw_priv->pages);
kfree(fw_priv->fw_id);
kfree(fw_priv);
- put_device(dev);
+ kfree(dev);
module_put(THIS_MODULE);
}
@@ -408,13 +408,11 @@ static int fw_register_device(struct device **dev_p, const char *fw_name,
if (retval) {
dev_err(device, "%s: device_register failed\n", __func__);
put_device(f_dev);
- goto error_kfree_fw_id;
+ return retval;
}
*dev_p = f_dev;
return 0;
-error_kfree_fw_id:
- kfree(fw_priv->fw_id);
error_kfree:
kfree(f_dev);
kfree(fw_priv);
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index fae725458981..58a3e572f2c9 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -762,6 +762,7 @@ static int dpm_prepare(pm_message_t state)
dev->power.status = DPM_ON;
if (error == -EAGAIN) {
put_device(dev);
+ error = 0;
continue;
}
printk(KERN_ERR "PM: Failed to prepare device %s "
diff --git a/drivers/block/amiflop.c b/drivers/block/amiflop.c
index 9c6e5b0fe894..2f07b7c99a95 100644
--- a/drivers/block/amiflop.c
+++ b/drivers/block/amiflop.c
@@ -1645,7 +1645,7 @@ static int __init fd_probe_drives(void)
{
int drive,drives,nomem;
- printk(KERN_INFO "FD: probing units\n" KERN_INFO "found ");
+ printk(KERN_INFO "FD: probing units\nfound ");
drives=0;
nomem=0;
for(drive=0;drive<FD_MAX_UNITS;drive++) {
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
index c7a527c08a09..65a0655e7fc8 100644
--- a/drivers/block/cciss.c
+++ b/drivers/block/cciss.c
@@ -226,8 +226,18 @@ static inline void addQ(struct hlist_head *list, CommandList_struct *c)
static inline void removeQ(CommandList_struct *c)
{
- if (WARN_ON(hlist_unhashed(&c->list)))
+ /*
+ * After kexec/dump some commands might still
+ * be in flight, which the firmware will try
+ * to complete. Resetting the firmware doesn't work
+ * with old fw revisions, so we have to mark
+ * them off as 'stale' to prevent the driver from
+ * falling over.
+ */
+ if (WARN_ON(hlist_unhashed(&c->list))) {
+ c->cmd_type = CMD_MSG_STALE;
return;
+ }
hlist_del_init(&c->list);
}
@@ -4246,7 +4256,8 @@ static void fail_all_cmds(unsigned long ctlr)
while (!hlist_empty(&h->cmpQ)) {
c = hlist_entry(h->cmpQ.first, CommandList_struct, list);
removeQ(c);
- c->err_info->CommandStatus = CMD_HARDWARE_ERR;
+ if (c->cmd_type != CMD_MSG_STALE)
+ c->err_info->CommandStatus = CMD_HARDWARE_ERR;
if (c->cmd_type == CMD_RWREQ) {
complete_command(h, c, 0);
} else if (c->cmd_type == CMD_IOCTL_PEND)
diff --git a/drivers/block/cciss_cmd.h b/drivers/block/cciss_cmd.h
index cd665b00c7c5..dbaed1ea0da3 100644
--- a/drivers/block/cciss_cmd.h
+++ b/drivers/block/cciss_cmd.h
@@ -274,6 +274,7 @@ typedef struct _ErrorInfo_struct {
#define CMD_SCSI 0x03
#define CMD_MSG_DONE 0x04
#define CMD_MSG_TIMEOUT 0x05
+#define CMD_MSG_STALE 0xff
/* This structure needs to be divisible by 8 for new
* indexing method.
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index 862b40c90181..91b753013780 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -3327,7 +3327,10 @@ static inline int set_geometry(unsigned int cmd, struct floppy_struct *g,
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
mutex_lock(&open_lock);
- LOCK_FDC(drive, 1);
+ if (lock_fdc(drive, 1)) {
+ mutex_unlock(&open_lock);
+ return -EINTR;
+ }
floppy_type[type] = *g;
floppy_type[type].name = "user format";
for (cnt = type << 2; cnt < (type << 2) + 4; cnt++)
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
index 83650e00632d..99a506f619b7 100644
--- a/drivers/block/pktcdvd.c
+++ b/drivers/block/pktcdvd.c
@@ -1372,8 +1372,10 @@ try_next_bio:
wakeup = (pd->write_congestion_on > 0
&& pd->bio_queue_size <= pd->write_congestion_off);
spin_unlock(&pd->lock);
- if (wakeup)
- clear_bdi_congested(&pd->disk->queue->backing_dev_info, WRITE);
+ if (wakeup) {
+ clear_bdi_congested(&pd->disk->queue->backing_dev_info,
+ BLK_RW_ASYNC);
+ }
pkt->sleep_time = max(PACKET_WAIT_TIME, 1);
pkt_set_state(pkt, PACKET_WAITING_STATE);
@@ -2592,10 +2594,10 @@ static int pkt_make_request(struct request_queue *q, struct bio *bio)
spin_lock(&pd->lock);
if (pd->write_congestion_on > 0
&& pd->bio_queue_size >= pd->write_congestion_on) {
- set_bdi_congested(&q->backing_dev_info, WRITE);
+ set_bdi_congested(&q->backing_dev_info, BLK_RW_ASYNC);
do {
spin_unlock(&pd->lock);
- congestion_wait(WRITE, HZ);
+ congestion_wait(BLK_RW_ASYNC, HZ);
spin_lock(&pd->lock);
} while(pd->bio_queue_size > pd->write_congestion_off);
}
diff --git a/drivers/block/xsysace.c b/drivers/block/xsysace.c
index f08491a3a813..b20abe102a2b 100644
--- a/drivers/block/xsysace.c
+++ b/drivers/block/xsysace.c
@@ -390,9 +390,10 @@ static inline void ace_dump_mem(void *base, int len)
static void ace_dump_regs(struct ace_device *ace)
{
- dev_info(ace->dev, " ctrl: %.8x seccnt/cmd: %.4x ver:%.4x\n"
- KERN_INFO " status:%.8x mpu_lba:%.8x busmode:%4x\n"
- KERN_INFO " error: %.8x cfg_lba:%.8x fatstat:%.4x\n",
+ dev_info(ace->dev,
+ " ctrl: %.8x seccnt/cmd: %.4x ver:%.4x\n"
+ " status:%.8x mpu_lba:%.8x busmode:%4x\n"
+ " error: %.8x cfg_lba:%.8x fatstat:%.4x\n",
ace_in32(ace, ACE_CTRL),
ace_in(ace, ACE_SECCNTCMD),
ace_in(ace, ACE_VERSION),
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index 0bd01f49cfd8..6a06913b01d3 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -1029,10 +1029,6 @@ config CS5535_GPIO
If compiled as a module, it will be called cs5535_gpio.
-config GPIO_VR41XX
- tristate "NEC VR4100 series General-purpose I/O Unit support"
- depends on CPU_VR41XX
-
config RAW_DRIVER
tristate "RAW driver (/dev/raw/rawN)"
depends on BLOCK
diff --git a/drivers/char/Makefile b/drivers/char/Makefile
index 189efcff08ce..66f779ad4f4c 100644
--- a/drivers/char/Makefile
+++ b/drivers/char/Makefile
@@ -95,7 +95,6 @@ obj-$(CONFIG_SCx200_GPIO) += scx200_gpio.o
obj-$(CONFIG_PC8736x_GPIO) += pc8736x_gpio.o
obj-$(CONFIG_NSC_GPIO) += nsc_gpio.o
obj-$(CONFIG_CS5535_GPIO) += cs5535_gpio.o
-obj-$(CONFIG_GPIO_VR41XX) += vr41xx_giu.o
obj-$(CONFIG_GPIO_TB0219) += tb0219.o
obj-$(CONFIG_TELCLOCK) += tlclk.o
diff --git a/drivers/char/bfin_jtag_comm.c b/drivers/char/bfin_jtag_comm.c
index 44c113d56045..1d7c34c73b20 100644
--- a/drivers/char/bfin_jtag_comm.c
+++ b/drivers/char/bfin_jtag_comm.c
@@ -8,6 +8,10 @@
* Licensed under the GPL-2 or later.
*/
+#define DRV_NAME "bfin-jtag-comm"
+#define DEV_NAME "ttyBFJC"
+#define pr_fmt(fmt) DRV_NAME ": " fmt
+
#include <linux/circ_buf.h>
#include <linux/console.h>
#include <linux/delay.h>
@@ -22,18 +26,14 @@
#include <linux/tty_flip.h>
#include <asm/atomic.h>
+#define pr_init(fmt, args...) ({ static const __initconst char __fmt[] = fmt; printk(__fmt, ## args); })
+
/* See the Debug/Emulation chapter in the HRM */
#define EMUDOF 0x00000001 /* EMUDAT_OUT full & valid */
#define EMUDIF 0x00000002 /* EMUDAT_IN full & valid */
#define EMUDOOVF 0x00000004 /* EMUDAT_OUT overflow */
#define EMUDIOVF 0x00000008 /* EMUDAT_IN overflow */
-#define DRV_NAME "bfin-jtag-comm"
-#define DEV_NAME "ttyBFJC"
-
-#define pr_init(fmt, args...) ({ static const __initdata char __fmt[] = fmt; printk(__fmt, ## args); })
-#define debug(fmt, args...) pr_debug(DRV_NAME ": " fmt, ## args)
-
static inline uint32_t bfin_write_emudat(uint32_t emudat)
{
__asm__ __volatile__("emudat = %0;" : : "d"(emudat));
@@ -74,7 +74,7 @@ bfin_jc_emudat_manager(void *arg)
while (!kthread_should_stop()) {
/* no one left to give data to, so sleep */
if (bfin_jc_tty == NULL && circ_empty(&bfin_jc_write_buf)) {
- debug("waiting for readers\n");
+ pr_debug("waiting for readers\n");
__set_current_state(TASK_UNINTERRUPTIBLE);
schedule();
__set_current_state(TASK_RUNNING);
@@ -82,7 +82,7 @@ bfin_jc_emudat_manager(void *arg)
/* no data available, so just chill */
if (!(bfin_read_DBGSTAT() & EMUDIF) && circ_empty(&bfin_jc_write_buf)) {
- debug("waiting for data (in_len = %i) (circ: %i %i)\n",
+ pr_debug("waiting for data (in_len = %i) (circ: %i %i)\n",
inbound_len, bfin_jc_write_buf.tail, bfin_jc_write_buf.head);
if (inbound_len)
schedule();
@@ -99,11 +99,11 @@ bfin_jc_emudat_manager(void *arg)
if (tty != NULL) {
uint32_t emudat = bfin_read_emudat();
if (inbound_len == 0) {
- debug("incoming length: 0x%08x\n", emudat);
+ pr_debug("incoming length: 0x%08x\n", emudat);
inbound_len = emudat;
} else {
size_t num_chars = (4 <= inbound_len ? 4 : inbound_len);
- debug(" incoming data: 0x%08x (pushing %zu)\n", emudat, num_chars);
+ pr_debug(" incoming data: 0x%08x (pushing %zu)\n", emudat, num_chars);
inbound_len -= num_chars;
tty_insert_flip_string(tty, (unsigned char *)&emudat, num_chars);
tty_flip_buffer_push(tty);
@@ -117,7 +117,7 @@ bfin_jc_emudat_manager(void *arg)
if (outbound_len == 0) {
outbound_len = circ_cnt(&bfin_jc_write_buf);
bfin_write_emudat(outbound_len);
- debug("outgoing length: 0x%08x\n", outbound_len);
+ pr_debug("outgoing length: 0x%08x\n", outbound_len);
} else {
struct tty_struct *tty;
int tail = bfin_jc_write_buf.tail;
@@ -136,7 +136,7 @@ bfin_jc_emudat_manager(void *arg)
if (tty)
tty_wakeup(tty);
mutex_unlock(&bfin_jc_tty_mutex);
- debug(" outgoing data: 0x%08x (pushing %zu)\n", emudat, ate);
+ pr_debug(" outgoing data: 0x%08x (pushing %zu)\n", emudat, ate);
}
}
}
@@ -149,7 +149,7 @@ static int
bfin_jc_open(struct tty_struct *tty, struct file *filp)
{
mutex_lock(&bfin_jc_tty_mutex);
- debug("open %lu\n", bfin_jc_count);
+ pr_debug("open %lu\n", bfin_jc_count);
++bfin_jc_count;
bfin_jc_tty = tty;
wake_up_process(bfin_jc_kthread);
@@ -161,7 +161,7 @@ static void
bfin_jc_close(struct tty_struct *tty, struct file *filp)
{
mutex_lock(&bfin_jc_tty_mutex);
- debug("close %lu\n", bfin_jc_count);
+ pr_debug("close %lu\n", bfin_jc_count);
if (--bfin_jc_count == 0)
bfin_jc_tty = NULL;
wake_up_process(bfin_jc_kthread);
@@ -174,7 +174,7 @@ bfin_jc_circ_write(const unsigned char *buf, int count)
{
int i;
count = min(count, circ_free(&bfin_jc_write_buf));
- debug("going to write chunk of %i bytes\n", count);
+ pr_debug("going to write chunk of %i bytes\n", count);
for (i = 0; i < count; ++i)
circ_byte(&bfin_jc_write_buf, bfin_jc_write_buf.head + i) = buf[i];
bfin_jc_write_buf.head += i;
diff --git a/drivers/char/bsr.c b/drivers/char/bsr.c
index 140ea10ecb88..c02db01f736e 100644
--- a/drivers/char/bsr.c
+++ b/drivers/char/bsr.c
@@ -27,6 +27,7 @@
#include <linux/cdev.h>
#include <linux/list.h>
#include <linux/mm.h>
+#include <asm/pgtable.h>
#include <asm/io.h>
/*
@@ -75,12 +76,13 @@ static struct class *bsr_class;
static int bsr_major;
enum {
- BSR_8 = 0,
- BSR_16 = 1,
- BSR_64 = 2,
- BSR_128 = 3,
- BSR_UNKNOWN = 4,
- BSR_MAX = 5,
+ BSR_8 = 0,
+ BSR_16 = 1,
+ BSR_64 = 2,
+ BSR_128 = 3,
+ BSR_4096 = 4,
+ BSR_UNKNOWN = 5,
+ BSR_MAX = 6,
};
static unsigned bsr_types[BSR_MAX];
@@ -117,15 +119,22 @@ static int bsr_mmap(struct file *filp, struct vm_area_struct *vma)
{
unsigned long size = vma->vm_end - vma->vm_start;
struct bsr_dev *dev = filp->private_data;
+ int ret;
- if (size > dev->bsr_len || (size & (PAGE_SIZE-1)))
- return -EINVAL;
-
- vma->vm_flags |= (VM_IO | VM_DONTEXPAND);
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
- if (io_remap_pfn_range(vma, vma->vm_start, dev->bsr_addr >> PAGE_SHIFT,
- size, vma->vm_page_prot))
+ /* check for the case of a small BSR device and map one 4k page for it*/
+ if (dev->bsr_len < PAGE_SIZE && size == PAGE_SIZE)
+ ret = remap_4k_pfn(vma, vma->vm_start, dev->bsr_addr >> 12,
+ vma->vm_page_prot);
+ else if (size <= dev->bsr_len)
+ ret = io_remap_pfn_range(vma, vma->vm_start,
+ dev->bsr_addr >> PAGE_SHIFT,
+ size, vma->vm_page_prot);
+ else
+ return -EINVAL;
+
+ if (ret)
return -EAGAIN;
return 0;
@@ -205,6 +214,11 @@ static int bsr_add_node(struct device_node *bn)
cur->bsr_stride = bsr_stride[i];
cur->bsr_dev = MKDEV(bsr_major, i + total_bsr_devs);
+ /* if we have a bsr_len of > 4k and less then PAGE_SIZE (64k pages) */
+ /* we can only map 4k of it, so only advertise the 4k in sysfs */
+ if (cur->bsr_len > 4096 && cur->bsr_len < PAGE_SIZE)
+ cur->bsr_len = 4096;
+
switch(cur->bsr_bytes) {
case 8:
cur->bsr_type = BSR_8;
@@ -218,9 +232,11 @@ static int bsr_add_node(struct device_node *bn)
case 128:
cur->bsr_type = BSR_128;
break;
+ case 4096:
+ cur->bsr_type = BSR_4096;
+ break;
default:
cur->bsr_type = BSR_UNKNOWN;
- printk(KERN_INFO "unknown BSR size %d\n",cur->bsr_bytes);
}
cur->bsr_num = bsr_types[cur->bsr_type];
diff --git a/drivers/char/hw_random/intel-rng.c b/drivers/char/hw_random/intel-rng.c
index 5dcbe603eca2..91b53eb1c053 100644
--- a/drivers/char/hw_random/intel-rng.c
+++ b/drivers/char/hw_random/intel-rng.c
@@ -305,10 +305,11 @@ static int __init intel_init_hw_struct(struct intel_rng_hw *intel_rng_hw,
(BIOS_CNTL_LOCK_ENABLE_MASK|BIOS_CNTL_WRITE_ENABLE_MASK))
== BIOS_CNTL_LOCK_ENABLE_MASK) {
static __initdata /*const*/ char warning[] =
- KERN_WARNING PFX "Firmware space is locked read-only. If you can't or\n"
- KERN_WARNING PFX "don't want to disable this in firmware setup, and if\n"
- KERN_WARNING PFX "you are certain that your system has a functional\n"
- KERN_WARNING PFX "RNG, try using the 'no_fwh_detect' option.\n";
+ KERN_WARNING
+PFX "Firmware space is locked read-only. If you can't or\n"
+PFX "don't want to disable this in firmware setup, and if\n"
+PFX "you are certain that your system has a functional\n"
+PFX "RNG, try using the 'no_fwh_detect' option.\n";
if (no_fwh_detect)
return -ENODEV;
diff --git a/drivers/char/isicom.c b/drivers/char/isicom.c
index 4159292e35cf..621d1184673c 100644
--- a/drivers/char/isicom.c
+++ b/drivers/char/isicom.c
@@ -1478,10 +1478,10 @@ static int __devinit load_firmware(struct pci_dev *pdev,
status = inw(base + 0x4);
if (status != 0) {
dev_warn(&pdev->dev, "Card%d rejected load header:\n"
- KERN_WARNING "Address:0x%x\n"
- KERN_WARNING "Count:0x%x\n"
- KERN_WARNING "Status:0x%x\n",
- index + 1, frame->addr, frame->count, status);
+ "Address:0x%x\n"
+ "Count:0x%x\n"
+ "Status:0x%x\n",
+ index + 1, frame->addr, frame->count, status);
goto errrelfw;
}
outsw(base, frame->data, word_count);
@@ -1526,10 +1526,10 @@ static int __devinit load_firmware(struct pci_dev *pdev,
status = inw(base + 0x4);
if (status != 0) {
dev_warn(&pdev->dev, "Card%d rejected verify header:\n"
- KERN_WARNING "Address:0x%x\n"
- KERN_WARNING "Count:0x%x\n"
- KERN_WARNING "Status: 0x%x\n",
- index + 1, frame->addr, frame->count, status);
+ "Address:0x%x\n"
+ "Count:0x%x\n"
+ "Status: 0x%x\n",
+ index + 1, frame->addr, frame->count, status);
goto errrelfw;
}
diff --git a/drivers/char/moxa.c b/drivers/char/moxa.c
index 6799588b0099..65b6ff2442c6 100644
--- a/drivers/char/moxa.c
+++ b/drivers/char/moxa.c
@@ -1189,11 +1189,6 @@ static int moxa_open(struct tty_struct *tty, struct file *filp)
return -ENODEV;
}
- if (port % MAX_PORTS_PER_BOARD >= brd->numPorts) {
- retval = -ENODEV;
- goto out_unlock;
- }
-
ch = &brd->ports[port % MAX_PORTS_PER_BOARD];
ch->port.count++;
tty->driver_data = ch;
@@ -1218,8 +1213,8 @@ static int moxa_open(struct tty_struct *tty, struct file *filp)
moxa_close_port(tty);
} else
ch->port.flags |= ASYNC_NORMAL_ACTIVE;
-out_unlock:
mutex_unlock(&moxa_openlock);
+
return retval;
}
diff --git a/drivers/char/mxser.c b/drivers/char/mxser.c
index 9533f43a30bb..52d953eb30c3 100644
--- a/drivers/char/mxser.c
+++ b/drivers/char/mxser.c
@@ -1048,8 +1048,6 @@ static int mxser_open(struct tty_struct *tty, struct file *filp)
if (retval)
return retval;
- /* unmark here for very high baud rate (ex. 921600 bps) used */
- tty->low_latency = 1;
return 0;
}
diff --git a/drivers/char/n_hdlc.c b/drivers/char/n_hdlc.c
index 461ece591a5b..1c43c8cdee25 100644
--- a/drivers/char/n_hdlc.c
+++ b/drivers/char/n_hdlc.c
@@ -10,7 +10,6 @@
* Paul Mackerras <Paul.Mackerras@cs.anu.edu.au>
*
* Original release 01/11/99
- * $Id: n_hdlc.c,v 4.8 2003/05/06 21:18:51 paulkf Exp $
*
* This code is released under the GNU General Public License (GPL)
*
@@ -79,7 +78,6 @@
*/
#define HDLC_MAGIC 0x239e
-#define HDLC_VERSION "$Revision: 4.8 $"
#include <linux/module.h>
#include <linux/init.h>
@@ -114,7 +112,7 @@
#define MAX_HDLC_FRAME_SIZE 65535
#define DEFAULT_RX_BUF_COUNT 10
#define MAX_RX_BUF_COUNT 60
-#define DEFAULT_TX_BUF_COUNT 1
+#define DEFAULT_TX_BUF_COUNT 3
struct n_hdlc_buf {
struct n_hdlc_buf *link;
@@ -199,6 +197,31 @@ static void n_hdlc_tty_wakeup(struct tty_struct *tty);
#define tty2n_hdlc(tty) ((struct n_hdlc *) ((tty)->disc_data))
#define n_hdlc2tty(n_hdlc) ((n_hdlc)->tty)
+static void flush_rx_queue(struct tty_struct *tty)
+{
+ struct n_hdlc *n_hdlc = tty2n_hdlc(tty);
+ struct n_hdlc_buf *buf;
+
+ while ((buf = n_hdlc_buf_get(&n_hdlc->rx_buf_list)))
+ n_hdlc_buf_put(&n_hdlc->rx_free_buf_list, buf);
+}
+
+static void flush_tx_queue(struct tty_struct *tty)
+{
+ struct n_hdlc *n_hdlc = tty2n_hdlc(tty);
+ struct n_hdlc_buf *buf;
+ unsigned long flags;
+
+ while ((buf = n_hdlc_buf_get(&n_hdlc->tx_buf_list)))
+ n_hdlc_buf_put(&n_hdlc->tx_free_buf_list, buf);
+ spin_lock_irqsave(&n_hdlc->tx_buf_list.spinlock, flags);
+ if (n_hdlc->tbuf) {
+ n_hdlc_buf_put(&n_hdlc->tx_free_buf_list, n_hdlc->tbuf);
+ n_hdlc->tbuf = NULL;
+ }
+ spin_unlock_irqrestore(&n_hdlc->tx_buf_list.spinlock, flags);
+}
+
static struct tty_ldisc_ops n_hdlc_ldisc = {
.owner = THIS_MODULE,
.magic = TTY_LDISC_MAGIC,
@@ -211,6 +234,7 @@ static struct tty_ldisc_ops n_hdlc_ldisc = {
.poll = n_hdlc_tty_poll,
.receive_buf = n_hdlc_tty_receive,
.write_wakeup = n_hdlc_tty_wakeup,
+ .flush_buffer = flush_rx_queue,
};
/**
@@ -341,10 +365,7 @@ static int n_hdlc_tty_open (struct tty_struct *tty)
set_bit(TTY_NO_WRITE_SPLIT,&tty->flags);
#endif
- /* Flush any pending characters in the driver and discipline. */
- if (tty->ldisc->ops->flush_buffer)
- tty->ldisc->ops->flush_buffer(tty);
-
+ /* flush receive data from driver */
tty_driver_flush_buffer(tty);
if (debuglevel >= DEBUG_LEVEL_INFO)
@@ -763,6 +784,14 @@ static int n_hdlc_tty_ioctl(struct tty_struct *tty, struct file *file,
error = put_user(count, (int __user *)arg);
break;
+ case TCFLSH:
+ switch (arg) {
+ case TCIOFLUSH:
+ case TCOFLUSH:
+ flush_tx_queue(tty);
+ }
+ /* fall through to default */
+
default:
error = n_tty_ioctl_helper(tty, file, cmd, arg);
break;
@@ -919,8 +948,7 @@ static struct n_hdlc_buf* n_hdlc_buf_get(struct n_hdlc_buf_list *list)
} /* end of n_hdlc_buf_get() */
static char hdlc_banner[] __initdata =
- KERN_INFO "HDLC line discipline: version " HDLC_VERSION
- ", maxframe=%u\n";
+ KERN_INFO "HDLC line discipline maxframe=%u\n";
static char hdlc_register_ok[] __initdata =
KERN_INFO "N_HDLC line discipline registered.\n";
static char hdlc_register_fail[] __initdata =
diff --git a/drivers/char/n_r3964.c b/drivers/char/n_r3964.c
index d2e93e343226..2e99158ebb8a 100644
--- a/drivers/char/n_r3964.c
+++ b/drivers/char/n_r3964.c
@@ -1062,7 +1062,7 @@ static ssize_t r3964_read(struct tty_struct *tty, struct file *file,
struct r3964_client_info *pClient;
struct r3964_message *pMsg;
struct r3964_client_message theMsg;
- int count;
+ int ret;
TRACE_L("read()");
@@ -1074,8 +1074,8 @@ static ssize_t r3964_read(struct tty_struct *tty, struct file *file,
if (pMsg == NULL) {
/* no messages available. */
if (file->f_flags & O_NONBLOCK) {
- unlock_kernel();
- return -EAGAIN;
+ ret = -EAGAIN;
+ goto unlock;
}
/* block until there is a message: */
wait_event_interruptible(pInfo->read_wait,
@@ -1085,29 +1085,31 @@ static ssize_t r3964_read(struct tty_struct *tty, struct file *file,
/* If we still haven't got a message, we must have been signalled */
if (!pMsg) {
- unlock_kernel();
- return -EINTR;
+ ret = -EINTR;
+ goto unlock;
}
/* deliver msg to client process: */
theMsg.msg_id = pMsg->msg_id;
theMsg.arg = pMsg->arg;
theMsg.error_code = pMsg->error_code;
- count = sizeof(struct r3964_client_message);
+ ret = sizeof(struct r3964_client_message);
kfree(pMsg);
TRACE_M("r3964_read - msg kfree %p", pMsg);
- if (copy_to_user(buf, &theMsg, count)) {
- unlock_kernel();
- return -EFAULT;
+ if (copy_to_user(buf, &theMsg, ret)) {
+ ret = -EFAULT;
+ goto unlock;
}
- TRACE_PS("read - return %d", count);
- return count;
+ TRACE_PS("read - return %d", ret);
+ goto unlock;
}
+ ret = -EPERM;
+unlock:
unlock_kernel();
- return -EPERM;
+ return ret;
}
static ssize_t r3964_write(struct tty_struct *tty, struct file *file,
diff --git a/drivers/char/nozomi.c b/drivers/char/nozomi.c
index d6102b644b55..574f1c79b6e6 100644
--- a/drivers/char/nozomi.c
+++ b/drivers/char/nozomi.c
@@ -1591,8 +1591,6 @@ static int ntty_open(struct tty_struct *tty, struct file *file)
/* Enable interrupt downlink for channel */
if (port->port.count == 1) {
- /* FIXME: is this needed now ? */
- tty->low_latency = 1;
tty->driver_data = port;
tty_port_tty_set(&port->port, tty);
DBG1("open: %d", port->token_dl);
diff --git a/drivers/char/pcmcia/cm4000_cs.c b/drivers/char/pcmcia/cm4000_cs.c
index dbb912574569..881934c068c8 100644
--- a/drivers/char/pcmcia/cm4000_cs.c
+++ b/drivers/char/pcmcia/cm4000_cs.c
@@ -1575,7 +1575,8 @@ static long cmm_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
clear_bit(LOCK_IO, &dev->flags);
wake_up_interruptible(&dev->ioq);
- return 0;
+ rc = 0;
+ break;
case CM_IOCSPTS:
{
struct ptsreq krnptsreq;
diff --git a/drivers/char/pty.c b/drivers/char/pty.c
index daebe1ba43d4..9d1b4f548f67 100644
--- a/drivers/char/pty.c
+++ b/drivers/char/pty.c
@@ -75,114 +75,88 @@ static void pty_close(struct tty_struct *tty, struct file *filp)
*/
static void pty_unthrottle(struct tty_struct *tty)
{
- struct tty_struct *o_tty = tty->link;
-
- if (!o_tty)
- return;
-
- tty_wakeup(o_tty);
+ tty_wakeup(tty->link);
set_bit(TTY_THROTTLED, &tty->flags);
}
-/*
- * WSH 05/24/97: modified to
- * (1) use space in tty->flip instead of a shared temp buffer
- * The flip buffers aren't being used for a pty, so there's lots
- * of space available. The buffer is protected by a per-pty
- * semaphore that should almost never come under contention.
- * (2) avoid redundant copying for cases where count >> receive_room
- * N.B. Calls from user space may now return an error code instead of
- * a count.
+/**
+ * pty_space - report space left for writing
+ * @to: tty we are writing into
*
- * FIXME: Our pty_write method is called with our ldisc lock held but
- * not our partners. We can't just wait on the other one blindly without
- * risking deadlocks. At some point when everything has settled down we need
- * to look into making pty_write at least able to sleep over an ldisc change.
+ * The tty buffers allow 64K but we sneak a peak and clip at 8K this
+ * allows a lot of overspill room for echo and other fun messes to
+ * be handled properly
+ */
+
+static int pty_space(struct tty_struct *to)
+{
+ int n = 8192 - to->buf.memory_used;
+ if (n < 0)
+ return 0;
+ return n;
+}
+
+/**
+ * pty_write - write to a pty
+ * @tty: the tty we write from
+ * @buf: kernel buffer of data
+ * @count: bytes to write
*
- * The return on no ldisc is a bit counter intuitive but the logic works
- * like this. During an ldisc change the other end will flush its buffers. We
- * thus return the full length which is identical to the case where we had
- * proper locking and happened to queue the bytes just before the flush during
- * the ldisc change.
+ * Our "hardware" write method. Data is coming from the ldisc which
+ * may be in a non sleeping state. We simply throw this at the other
+ * end of the link as if we were an IRQ handler receiving stuff for
+ * the other side of the pty/tty pair.
*/
+
static int pty_write(struct tty_struct *tty, const unsigned char *buf,
int count)
{
struct tty_struct *to = tty->link;
- struct tty_ldisc *ld;
- int c = count;
+ int c;
- if (!to || tty->stopped)
+ if (tty->stopped)
return 0;
- ld = tty_ldisc_ref(to);
-
- if (ld) {
- c = to->receive_room;
- if (c > count)
- c = count;
- ld->ops->receive_buf(to, buf, NULL, c);
- tty_ldisc_deref(ld);
+
+ /* This isn't locked but our 8K is quite sloppy so no
+ big deal */
+
+ c = pty_space(to);
+ if (c > count)
+ c = count;
+ if (c > 0) {
+ /* Stuff the data into the input queue of the other end */
+ c = tty_insert_flip_string(to, buf, c);
+ /* And shovel */
+ tty_flip_buffer_push(to);
+ tty_wakeup(tty);
}
return c;
}
+/**
+ * pty_write_room - write space
+ * @tty: tty we are writing from
+ *
+ * Report how many bytes the ldisc can send into the queue for
+ * the other device.
+ */
+
static int pty_write_room(struct tty_struct *tty)
{
- struct tty_struct *to = tty->link;
-
- if (!to || tty->stopped)
- return 0;
-
- return to->receive_room;
+ return pty_space(tty->link);
}
-/*
- * WSH 05/24/97: Modified for asymmetric MASTER/SLAVE behavior
- * The chars_in_buffer() value is used by the ldisc select() function
- * to hold off writing when chars_in_buffer > WAKEUP_CHARS (== 256).
- * The pty driver chars_in_buffer() Master/Slave must behave differently:
- *
- * The Master side needs to allow typed-ahead commands to accumulate
- * while being canonicalized, so we report "our buffer" as empty until
- * some threshold is reached, and then report the count. (Any count >
- * WAKEUP_CHARS is regarded by select() as "full".) To avoid deadlock
- * the count returned must be 0 if no canonical data is available to be
- * read. (The N_TTY ldisc.chars_in_buffer now knows this.)
+/**
+ * pty_chars_in_buffer - characters currently in our tx queue
+ * @tty: our tty
*
- * The Slave side passes all characters in raw mode to the Master side's
- * buffer where they can be read immediately, so in this case we can
- * return the true count in the buffer.
+ * Report how much we have in the transmit queue. As everything is
+ * instantly at the other end this is easy to implement.
*/
+
static int pty_chars_in_buffer(struct tty_struct *tty)
{
- struct tty_struct *to = tty->link;
- struct tty_ldisc *ld;
- int count = 0;
-
- /* We should get the line discipline lock for "tty->link" */
- if (!to)
- return 0;
- /* We cannot take a sleeping reference here without deadlocking with
- an ldisc change - but it doesn't really matter */
- ld = tty_ldisc_ref(to);
- if (ld == NULL)
- return 0;
-
- /* The ldisc must report 0 if no characters available to be read */
- if (ld->ops->chars_in_buffer)
- count = ld->ops->chars_in_buffer(to);
-
- tty_ldisc_deref(ld);
-
- if (tty->driver->subtype == PTY_TYPE_SLAVE)
- return count;
-
- /* Master side driver ... if the other side's read buffer is less than
- * half full, return 0 to allow writers to proceed; otherwise return
- * the count. This leaves a comfortable margin to avoid overflow,
- * and still allows half a buffer's worth of typed-ahead commands.
- */
- return (count < N_TTY_BUF_SIZE/2) ? 0 : count;
+ return 0;
}
/* Set the lock flag on a pty */
@@ -202,20 +176,10 @@ static void pty_flush_buffer(struct tty_struct *tty)
{
struct tty_struct *to = tty->link;
unsigned long flags;
- struct tty_ldisc *ld;
if (!to)
return;
- ld = tty_ldisc_ref(to);
-
- /* The other end is changing discipline */
- if (!ld)
- return;
-
- if (ld->ops->flush_buffer)
- to->ldisc->ops->flush_buffer(to);
- tty_ldisc_deref(ld);
-
+ /* tty_buffer_flush(to); FIXME */
if (to->packet) {
spin_lock_irqsave(&tty->ctrl_lock, flags);
tty->ctrl_status |= TIOCPKT_FLUSHWRITE;
diff --git a/drivers/char/synclink_gt.c b/drivers/char/synclink_gt.c
index 1386625fc4ca..a2e67e6df3a1 100644
--- a/drivers/char/synclink_gt.c
+++ b/drivers/char/synclink_gt.c
@@ -467,7 +467,6 @@ static unsigned int free_tbuf_count(struct slgt_info *info);
static unsigned int tbuf_bytes(struct slgt_info *info);
static void reset_tbufs(struct slgt_info *info);
static void tdma_reset(struct slgt_info *info);
-static void tdma_start(struct slgt_info *info);
static void tx_load(struct slgt_info *info, const char *buf, unsigned int count);
static void get_signals(struct slgt_info *info);
@@ -795,6 +794,18 @@ static void set_termios(struct tty_struct *tty, struct ktermios *old_termios)
}
}
+static void update_tx_timer(struct slgt_info *info)
+{
+ /*
+ * use worst case speed of 1200bps to calculate transmit timeout
+ * based on data in buffers (tbuf_bytes) and FIFO (128 bytes)
+ */
+ if (info->params.mode == MGSL_MODE_HDLC) {
+ int timeout = (tbuf_bytes(info) * 7) + 1000;
+ mod_timer(&info->tx_timer, jiffies + msecs_to_jiffies(timeout));
+ }
+}
+
static int write(struct tty_struct *tty,
const unsigned char *buf, int count)
{
@@ -838,8 +849,18 @@ start:
spin_lock_irqsave(&info->lock,flags);
if (!info->tx_active)
tx_start(info);
- else
- tdma_start(info);
+ else if (!(rd_reg32(info, TDCSR) & BIT0)) {
+ /* transmit still active but transmit DMA stopped */
+ unsigned int i = info->tbuf_current;
+ if (!i)
+ i = info->tbuf_count;
+ i--;
+ /* if DMA buf unsent must try later after tx idle */
+ if (desc_count(info->tbufs[i]))
+ ret = 0;
+ }
+ if (ret > 0)
+ update_tx_timer(info);
spin_unlock_irqrestore(&info->lock,flags);
}
@@ -1502,10 +1523,9 @@ static int hdlcdev_xmit(struct sk_buff *skb, struct net_device *dev)
/* save start time for transmit timeout detection */
dev->trans_start = jiffies;
- /* start hardware transmitter if necessary */
spin_lock_irqsave(&info->lock,flags);
- if (!info->tx_active)
- tx_start(info);
+ tx_start(info);
+ update_tx_timer(info);
spin_unlock_irqrestore(&info->lock,flags);
return 0;
@@ -3946,50 +3966,19 @@ static void tx_start(struct slgt_info *info)
slgt_irq_on(info, IRQ_TXUNDER + IRQ_TXIDLE);
/* clear tx idle and underrun status bits */
wr_reg16(info, SSR, (unsigned short)(IRQ_TXIDLE + IRQ_TXUNDER));
- if (info->params.mode == MGSL_MODE_HDLC)
- mod_timer(&info->tx_timer, jiffies +
- msecs_to_jiffies(5000));
} else {
slgt_irq_off(info, IRQ_TXDATA);
slgt_irq_on(info, IRQ_TXIDLE);
/* clear tx idle status bit */
wr_reg16(info, SSR, IRQ_TXIDLE);
}
- tdma_start(info);
+ /* set 1st descriptor address and start DMA */
+ wr_reg32(info, TDDAR, info->tbufs[info->tbuf_start].pdesc);
+ wr_reg32(info, TDCSR, BIT2 + BIT0);
info->tx_active = true;
}
}
-/*
- * start transmit DMA if inactive and there are unsent buffers
- */
-static void tdma_start(struct slgt_info *info)
-{
- unsigned int i;
-
- if (rd_reg32(info, TDCSR) & BIT0)
- return;
-
- /* transmit DMA inactive, check for unsent buffers */
- i = info->tbuf_start;
- while (!desc_count(info->tbufs[i])) {
- if (++i == info->tbuf_count)
- i = 0;
- if (i == info->tbuf_current)
- return;
- }
- info->tbuf_start = i;
-
- /* there are unsent buffers, start transmit DMA */
-
- /* reset needed if previous error condition */
- tdma_reset(info);
-
- /* set 1st descriptor address */
- wr_reg32(info, TDDAR, info->tbufs[info->tbuf_start].pdesc);
- wr_reg32(info, TDCSR, BIT2 + BIT0); /* IRQ + DMA enable */
-}
-
static void tx_stop(struct slgt_info *info)
{
unsigned short val;
@@ -5004,8 +4993,7 @@ static void tx_timeout(unsigned long context)
info->icount.txtimeout++;
}
spin_lock_irqsave(&info->lock,flags);
- info->tx_active = false;
- info->tx_count = 0;
+ tx_stop(info);
spin_unlock_irqrestore(&info->lock,flags);
#if SYNCLINK_GENERIC_HDLC
diff --git a/drivers/char/tb0219.c b/drivers/char/tb0219.c
index 6062b62800fd..b3ec9b10e292 100644
--- a/drivers/char/tb0219.c
+++ b/drivers/char/tb0219.c
@@ -1,7 +1,7 @@
/*
* Driver for TANBAC TB0219 base board.
*
- * Copyright (C) 2005 Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp>
+ * Copyright (C) 2005 Yoichi Yuasa <yuasa@linux-mips.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -28,7 +28,7 @@
#include <asm/vr41xx/giu.h>
#include <asm/vr41xx/tb0219.h>
-MODULE_AUTHOR("Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp>");
+MODULE_AUTHOR("Yoichi Yuasa <yuasa@linux-mips.org>");
MODULE_DESCRIPTION("TANBAC TB0219 base board driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/char/tty_ldisc.c b/drivers/char/tty_ldisc.c
index a19e935847b0..913aa8d3f1c5 100644
--- a/drivers/char/tty_ldisc.c
+++ b/drivers/char/tty_ldisc.c
@@ -867,15 +867,22 @@ void tty_ldisc_release(struct tty_struct *tty, struct tty_struct *o_tty)
tty_ldisc_wait_idle(tty);
/*
- * Shutdown the current line discipline, and reset it to N_TTY.
- *
- * FIXME: this MUST get fixed for the new reflocking
+ * Now kill off the ldisc
*/
+ tty_ldisc_close(tty, tty->ldisc);
+ tty_ldisc_put(tty->ldisc);
+ /* Force an oops if we mess this up */
+ tty->ldisc = NULL;
+
+ /* Ensure the next open requests the N_TTY ldisc */
+ tty_set_termios_ldisc(tty, N_TTY);
- tty_ldisc_reinit(tty);
/* This will need doing differently if we need to lock */
if (o_tty)
tty_ldisc_release(o_tty, NULL);
+
+ /* And the memory resources remaining (buffers, termios) will be
+ disposed of when the kref hits zero */
}
/**
diff --git a/drivers/char/tty_port.c b/drivers/char/tty_port.c
index 62dadfc95e34..4e862a75f7ff 100644
--- a/drivers/char/tty_port.c
+++ b/drivers/char/tty_port.c
@@ -193,7 +193,7 @@ int tty_port_block_til_ready(struct tty_port *port,
{
int do_clocal = 0, retval;
unsigned long flags;
- DECLARE_WAITQUEUE(wait, current);
+ DEFINE_WAIT(wait);
int cd;
/* block if port is in the process of being closed */
diff --git a/drivers/char/vr41xx_giu.c b/drivers/char/vr41xx_giu.c
index 54c837288d19..e69de29bb2d1 100644
--- a/drivers/char/vr41xx_giu.c
+++ b/drivers/char/vr41xx_giu.c
@@ -1,680 +0,0 @@
-/*
- * Driver for NEC VR4100 series General-purpose I/O Unit.
- *
- * Copyright (C) 2002 MontaVista Software Inc.
- * Author: Yoichi Yuasa <yyuasa@mvista.com or source@mvista.com>
- * Copyright (C) 2003-2007 Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-#include <linux/errno.h>
-#include <linux/fs.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/irq.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/smp_lock.h>
-#include <linux/spinlock.h>
-#include <linux/types.h>
-
-#include <asm/io.h>
-#include <asm/vr41xx/giu.h>
-#include <asm/vr41xx/irq.h>
-#include <asm/vr41xx/vr41xx.h>
-
-MODULE_AUTHOR("Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp>");
-MODULE_DESCRIPTION("NEC VR4100 series General-purpose I/O Unit driver");
-MODULE_LICENSE("GPL");
-
-static int major; /* default is dynamic major device number */
-module_param(major, int, 0);
-MODULE_PARM_DESC(major, "Major device number");
-
-#define GIUIOSELL 0x00
-#define GIUIOSELH 0x02
-#define GIUPIODL 0x04
-#define GIUPIODH 0x06
-#define GIUINTSTATL 0x08
-#define GIUINTSTATH 0x0a
-#define GIUINTENL 0x0c
-#define GIUINTENH 0x0e
-#define GIUINTTYPL 0x10
-#define GIUINTTYPH 0x12
-#define GIUINTALSELL 0x14
-#define GIUINTALSELH 0x16
-#define GIUINTHTSELL 0x18
-#define GIUINTHTSELH 0x1a
-#define GIUPODATL 0x1c
-#define GIUPODATEN 0x1c
-#define GIUPODATH 0x1e
- #define PIOEN0 0x0100
- #define PIOEN1 0x0200
-#define GIUPODAT 0x1e
-#define GIUFEDGEINHL 0x20
-#define GIUFEDGEINHH 0x22
-#define GIUREDGEINHL 0x24
-#define GIUREDGEINHH 0x26
-
-#define GIUUSEUPDN 0x1e0
-#define GIUTERMUPDN 0x1e2
-
-#define GPIO_HAS_PULLUPDOWN_IO 0x0001
-#define GPIO_HAS_OUTPUT_ENABLE 0x0002
-#define GPIO_HAS_INTERRUPT_EDGE_SELECT 0x0100
-
-static spinlock_t giu_lock;
-static unsigned long giu_flags;
-static unsigned int giu_nr_pins;
-
-static void __iomem *giu_base;
-
-#define giu_read(offset) readw(giu_base + (offset))
-#define giu_write(offset, value) writew((value), giu_base + (offset))
-
-#define GPIO_PIN_OF_IRQ(irq) ((irq) - GIU_IRQ_BASE)
-#define GIUINT_HIGH_OFFSET 16
-#define GIUINT_HIGH_MAX 32
-
-static inline uint16_t giu_set(uint16_t offset, uint16_t set)
-{
- uint16_t data;
-
- data = giu_read(offset);
- data |= set;
- giu_write(offset, data);
-
- return data;
-}
-
-static inline uint16_t giu_clear(uint16_t offset, uint16_t clear)
-{
- uint16_t data;
-
- data = giu_read(offset);
- data &= ~clear;
- giu_write(offset, data);
-
- return data;
-}
-
-static void ack_giuint_low(unsigned int irq)
-{
- giu_write(GIUINTSTATL, 1 << GPIO_PIN_OF_IRQ(irq));
-}
-
-static void mask_giuint_low(unsigned int irq)
-{
- giu_clear(GIUINTENL, 1 << GPIO_PIN_OF_IRQ(irq));
-}
-
-static void mask_ack_giuint_low(unsigned int irq)
-{
- unsigned int pin;
-
- pin = GPIO_PIN_OF_IRQ(irq);
- giu_clear(GIUINTENL, 1 << pin);
- giu_write(GIUINTSTATL, 1 << pin);
-}
-
-static void unmask_giuint_low(unsigned int irq)
-{
- giu_set(GIUINTENL, 1 << GPIO_PIN_OF_IRQ(irq));
-}
-
-static struct irq_chip giuint_low_irq_chip = {
- .name = "GIUINTL",
- .ack = ack_giuint_low,
- .mask = mask_giuint_low,
- .mask_ack = mask_ack_giuint_low,
- .unmask = unmask_giuint_low,
-};
-
-static void ack_giuint_high(unsigned int irq)
-{
- giu_write(GIUINTSTATH, 1 << (GPIO_PIN_OF_IRQ(irq) - GIUINT_HIGH_OFFSET));
-}
-
-static void mask_giuint_high(unsigned int irq)
-{
- giu_clear(GIUINTENH, 1 << (GPIO_PIN_OF_IRQ(irq) - GIUINT_HIGH_OFFSET));
-}
-
-static void mask_ack_giuint_high(unsigned int irq)
-{
- unsigned int pin;
-
- pin = GPIO_PIN_OF_IRQ(irq) - GIUINT_HIGH_OFFSET;
- giu_clear(GIUINTENH, 1 << pin);
- giu_write(GIUINTSTATH, 1 << pin);
-}
-
-static void unmask_giuint_high(unsigned int irq)
-{
- giu_set(GIUINTENH, 1 << (GPIO_PIN_OF_IRQ(irq) - GIUINT_HIGH_OFFSET));
-}
-
-static struct irq_chip giuint_high_irq_chip = {
- .name = "GIUINTH",
- .ack = ack_giuint_high,
- .mask = mask_giuint_high,
- .mask_ack = mask_ack_giuint_high,
- .unmask = unmask_giuint_high,
-};
-
-static int giu_get_irq(unsigned int irq)
-{
- uint16_t pendl, pendh, maskl, maskh;
- int i;
-
- pendl = giu_read(GIUINTSTATL);
- pendh = giu_read(GIUINTSTATH);
- maskl = giu_read(GIUINTENL);
- maskh = giu_read(GIUINTENH);
-
- maskl &= pendl;
- maskh &= pendh;
-
- if (maskl) {
- for (i = 0; i < 16; i++) {
- if (maskl & (1 << i))
- return GIU_IRQ(i);
- }
- } else if (maskh) {
- for (i = 0; i < 16; i++) {
- if (maskh & (1 << i))
- return GIU_IRQ(i + GIUINT_HIGH_OFFSET);
- }
- }
-
- printk(KERN_ERR "spurious GIU interrupt: %04x(%04x),%04x(%04x)\n",
- maskl, pendl, maskh, pendh);
-
- atomic_inc(&irq_err_count);
-
- return -EINVAL;
-}
-
-void vr41xx_set_irq_trigger(unsigned int pin, irq_trigger_t trigger, irq_signal_t signal)
-{
- uint16_t mask;
-
- if (pin < GIUINT_HIGH_OFFSET) {
- mask = 1 << pin;
- if (trigger != IRQ_TRIGGER_LEVEL) {
- giu_set(GIUINTTYPL, mask);
- if (signal == IRQ_SIGNAL_HOLD)
- giu_set(GIUINTHTSELL, mask);
- else
- giu_clear(GIUINTHTSELL, mask);
- if (giu_flags & GPIO_HAS_INTERRUPT_EDGE_SELECT) {
- switch (trigger) {
- case IRQ_TRIGGER_EDGE_FALLING:
- giu_set(GIUFEDGEINHL, mask);
- giu_clear(GIUREDGEINHL, mask);
- break;
- case IRQ_TRIGGER_EDGE_RISING:
- giu_clear(GIUFEDGEINHL, mask);
- giu_set(GIUREDGEINHL, mask);
- break;
- default:
- giu_set(GIUFEDGEINHL, mask);
- giu_set(GIUREDGEINHL, mask);
- break;
- }
- }
- set_irq_chip_and_handler(GIU_IRQ(pin),
- &giuint_low_irq_chip,
- handle_edge_irq);
- } else {
- giu_clear(GIUINTTYPL, mask);
- giu_clear(GIUINTHTSELL, mask);
- set_irq_chip_and_handler(GIU_IRQ(pin),
- &giuint_low_irq_chip,
- handle_level_irq);
- }
- giu_write(GIUINTSTATL, mask);
- } else if (pin < GIUINT_HIGH_MAX) {
- mask = 1 << (pin - GIUINT_HIGH_OFFSET);
- if (trigger != IRQ_TRIGGER_LEVEL) {
- giu_set(GIUINTTYPH, mask);
- if (signal == IRQ_SIGNAL_HOLD)
- giu_set(GIUINTHTSELH, mask);
- else
- giu_clear(GIUINTHTSELH, mask);
- if (giu_flags & GPIO_HAS_INTERRUPT_EDGE_SELECT) {
- switch (trigger) {
- case IRQ_TRIGGER_EDGE_FALLING:
- giu_set(GIUFEDGEINHH, mask);
- giu_clear(GIUREDGEINHH, mask);
- break;
- case IRQ_TRIGGER_EDGE_RISING:
- giu_clear(GIUFEDGEINHH, mask);
- giu_set(GIUREDGEINHH, mask);
- break;
- default:
- giu_set(GIUFEDGEINHH, mask);
- giu_set(GIUREDGEINHH, mask);
- break;
- }
- }
- set_irq_chip_and_handler(GIU_IRQ(pin),
- &giuint_high_irq_chip,
- handle_edge_irq);
- } else {
- giu_clear(GIUINTTYPH, mask);
- giu_clear(GIUINTHTSELH, mask);
- set_irq_chip_and_handler(GIU_IRQ(pin),
- &giuint_high_irq_chip,
- handle_level_irq);
- }
- giu_write(GIUINTSTATH, mask);
- }
-}
-EXPORT_SYMBOL_GPL(vr41xx_set_irq_trigger);
-
-void vr41xx_set_irq_level(unsigned int pin, irq_level_t level)
-{
- uint16_t mask;
-
- if (pin < GIUINT_HIGH_OFFSET) {
- mask = 1 << pin;
- if (level == IRQ_LEVEL_HIGH)
- giu_set(GIUINTALSELL, mask);
- else
- giu_clear(GIUINTALSELL, mask);
- giu_write(GIUINTSTATL, mask);
- } else if (pin < GIUINT_HIGH_MAX) {
- mask = 1 << (pin - GIUINT_HIGH_OFFSET);
- if (level == IRQ_LEVEL_HIGH)
- giu_set(GIUINTALSELH, mask);
- else
- giu_clear(GIUINTALSELH, mask);
- giu_write(GIUINTSTATH, mask);
- }
-}
-EXPORT_SYMBOL_GPL(vr41xx_set_irq_level);
-
-gpio_data_t vr41xx_gpio_get_pin(unsigned int pin)
-{
- uint16_t reg, mask;
-
- if (pin >= giu_nr_pins)
- return GPIO_DATA_INVAL;
-
- if (pin < 16) {
- reg = giu_read(GIUPIODL);
- mask = (uint16_t)1 << pin;
- } else if (pin < 32) {
- reg = giu_read(GIUPIODH);
- mask = (uint16_t)1 << (pin - 16);
- } else if (pin < 48) {
- reg = giu_read(GIUPODATL);
- mask = (uint16_t)1 << (pin - 32);
- } else {
- reg = giu_read(GIUPODATH);
- mask = (uint16_t)1 << (pin - 48);
- }
-
- if (reg & mask)
- return GPIO_DATA_HIGH;
-
- return GPIO_DATA_LOW;
-}
-EXPORT_SYMBOL_GPL(vr41xx_gpio_get_pin);
-
-int vr41xx_gpio_set_pin(unsigned int pin, gpio_data_t data)
-{
- uint16_t offset, mask, reg;
- unsigned long flags;
-
- if (pin >= giu_nr_pins)
- return -EINVAL;
-
- if (pin < 16) {
- offset = GIUPIODL;
- mask = (uint16_t)1 << pin;
- } else if (pin < 32) {
- offset = GIUPIODH;
- mask = (uint16_t)1 << (pin - 16);
- } else if (pin < 48) {
- offset = GIUPODATL;
- mask = (uint16_t)1 << (pin - 32);
- } else {
- offset = GIUPODATH;
- mask = (uint16_t)1 << (pin - 48);
- }
-
- spin_lock_irqsave(&giu_lock, flags);
-
- reg = giu_read(offset);
- if (data == GPIO_DATA_HIGH)
- reg |= mask;
- else
- reg &= ~mask;
- giu_write(offset, reg);
-
- spin_unlock_irqrestore(&giu_lock, flags);
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(vr41xx_gpio_set_pin);
-
-int vr41xx_gpio_set_direction(unsigned int pin, gpio_direction_t dir)
-{
- uint16_t offset, mask, reg;
- unsigned long flags;
-
- if (pin >= giu_nr_pins)
- return -EINVAL;
-
- if (pin < 16) {
- offset = GIUIOSELL;
- mask = (uint16_t)1 << pin;
- } else if (pin < 32) {
- offset = GIUIOSELH;
- mask = (uint16_t)1 << (pin - 16);
- } else {
- if (giu_flags & GPIO_HAS_OUTPUT_ENABLE) {
- offset = GIUPODATEN;
- mask = (uint16_t)1 << (pin - 32);
- } else {
- switch (pin) {
- case 48:
- offset = GIUPODATH;
- mask = PIOEN0;
- break;
- case 49:
- offset = GIUPODATH;
- mask = PIOEN1;
- break;
- default:
- return -EINVAL;
- }
- }
- }
-
- spin_lock_irqsave(&giu_lock, flags);
-
- reg = giu_read(offset);
- if (dir == GPIO_OUTPUT)
- reg |= mask;
- else
- reg &= ~mask;
- giu_write(offset, reg);
-
- spin_unlock_irqrestore(&giu_lock, flags);
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(vr41xx_gpio_set_direction);
-
-int vr41xx_gpio_pullupdown(unsigned int pin, gpio_pull_t pull)
-{
- uint16_t reg, mask;
- unsigned long flags;
-
- if ((giu_flags & GPIO_HAS_PULLUPDOWN_IO) != GPIO_HAS_PULLUPDOWN_IO)
- return -EPERM;
-
- if (pin >= 15)
- return -EINVAL;
-
- mask = (uint16_t)1 << pin;
-
- spin_lock_irqsave(&giu_lock, flags);
-
- if (pull == GPIO_PULL_UP || pull == GPIO_PULL_DOWN) {
- reg = giu_read(GIUTERMUPDN);
- if (pull == GPIO_PULL_UP)
- reg |= mask;
- else
- reg &= ~mask;
- giu_write(GIUTERMUPDN, reg);
-
- reg = giu_read(GIUUSEUPDN);
- reg |= mask;
- giu_write(GIUUSEUPDN, reg);
- } else {
- reg = giu_read(GIUUSEUPDN);
- reg &= ~mask;
- giu_write(GIUUSEUPDN, reg);
- }
-
- spin_unlock_irqrestore(&giu_lock, flags);
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(vr41xx_gpio_pullupdown);
-
-static ssize_t gpio_read(struct file *file, char __user *buf, size_t len,
- loff_t *ppos)
-{
- unsigned int pin;
- char value = '0';
-
- pin = iminor(file->f_path.dentry->d_inode);
- if (pin >= giu_nr_pins)
- return -EBADF;
-
- if (vr41xx_gpio_get_pin(pin) == GPIO_DATA_HIGH)
- value = '1';
-
- if (len <= 0)
- return -EFAULT;
-
- if (put_user(value, buf))
- return -EFAULT;
-
- return 1;
-}
-
-static ssize_t gpio_write(struct file *file, const char __user *data,
- size_t len, loff_t *ppos)
-{
- unsigned int pin;
- size_t i;
- char c;
- int retval = 0;
-
- pin = iminor(file->f_path.dentry->d_inode);
- if (pin >= giu_nr_pins)
- return -EBADF;
-
- for (i = 0; i < len; i++) {
- if (get_user(c, data + i))
- return -EFAULT;
-
- switch (c) {
- case '0':
- retval = vr41xx_gpio_set_pin(pin, GPIO_DATA_LOW);
- break;
- case '1':
- retval = vr41xx_gpio_set_pin(pin, GPIO_DATA_HIGH);
- break;
- case 'D':
- printk(KERN_INFO "GPIO%d: pull down\n", pin);
- retval = vr41xx_gpio_pullupdown(pin, GPIO_PULL_DOWN);
- break;
- case 'd':
- printk(KERN_INFO "GPIO%d: pull up/down disable\n", pin);
- retval = vr41xx_gpio_pullupdown(pin, GPIO_PULL_DISABLE);
- break;
- case 'I':
- printk(KERN_INFO "GPIO%d: input\n", pin);
- retval = vr41xx_gpio_set_direction(pin, GPIO_INPUT);
- break;
- case 'O':
- printk(KERN_INFO "GPIO%d: output\n", pin);
- retval = vr41xx_gpio_set_direction(pin, GPIO_OUTPUT);
- break;
- case 'o':
- printk(KERN_INFO "GPIO%d: output disable\n", pin);
- retval = vr41xx_gpio_set_direction(pin, GPIO_OUTPUT_DISABLE);
- break;
- case 'P':
- printk(KERN_INFO "GPIO%d: pull up\n", pin);
- retval = vr41xx_gpio_pullupdown(pin, GPIO_PULL_UP);
- break;
- case 'p':
- printk(KERN_INFO "GPIO%d: pull up/down disable\n", pin);
- retval = vr41xx_gpio_pullupdown(pin, GPIO_PULL_DISABLE);
- break;
- default:
- break;
- }
-
- if (retval < 0)
- break;
- }
-
- return i;
-}
-
-static int gpio_open(struct inode *inode, struct file *file)
-{
- unsigned int pin;
-
- cycle_kernel_lock();
- pin = iminor(inode);
- if (pin >= giu_nr_pins)
- return -EBADF;
-
- return nonseekable_open(inode, file);
-}
-
-static int gpio_release(struct inode *inode, struct file *file)
-{
- unsigned int pin;
-
- pin = iminor(inode);
- if (pin >= giu_nr_pins)
- return -EBADF;
-
- return 0;
-}
-
-static const struct file_operations gpio_fops = {
- .owner = THIS_MODULE,
- .read = gpio_read,
- .write = gpio_write,
- .open = gpio_open,
- .release = gpio_release,
-};
-
-static int __devinit giu_probe(struct platform_device *dev)
-{
- struct resource *res;
- unsigned int trigger, i, pin;
- struct irq_chip *chip;
- int irq, retval;
-
- switch (dev->id) {
- case GPIO_50PINS_PULLUPDOWN:
- giu_flags = GPIO_HAS_PULLUPDOWN_IO;
- giu_nr_pins = 50;
- break;
- case GPIO_36PINS:
- giu_nr_pins = 36;
- break;
- case GPIO_48PINS_EDGE_SELECT:
- giu_flags = GPIO_HAS_INTERRUPT_EDGE_SELECT;
- giu_nr_pins = 48;
- break;
- default:
- printk(KERN_ERR "GIU: unknown ID %d\n", dev->id);
- return -ENODEV;
- }
-
- res = platform_get_resource(dev, IORESOURCE_MEM, 0);
- if (!res)
- return -EBUSY;
-
- giu_base = ioremap(res->start, res->end - res->start + 1);
- if (!giu_base)
- return -ENOMEM;
-
- retval = register_chrdev(major, "GIU", &gpio_fops);
- if (retval < 0) {
- iounmap(giu_base);
- giu_base = NULL;
- return retval;
- }
-
- if (major == 0) {
- major = retval;
- printk(KERN_INFO "GIU: major number %d\n", major);
- }
-
- spin_lock_init(&giu_lock);
-
- giu_write(GIUINTENL, 0);
- giu_write(GIUINTENH, 0);
-
- trigger = giu_read(GIUINTTYPH) << 16;
- trigger |= giu_read(GIUINTTYPL);
- for (i = GIU_IRQ_BASE; i <= GIU_IRQ_LAST; i++) {
- pin = GPIO_PIN_OF_IRQ(i);
- if (pin < GIUINT_HIGH_OFFSET)
- chip = &giuint_low_irq_chip;
- else
- chip = &giuint_high_irq_chip;
-
- if (trigger & (1 << pin))
- set_irq_chip_and_handler(i, chip, handle_edge_irq);
- else
- set_irq_chip_and_handler(i, chip, handle_level_irq);
-
- }
-
- irq = platform_get_irq(dev, 0);
- if (irq < 0 || irq >= nr_irqs)
- return -EBUSY;
-
- return cascade_irq(irq, giu_get_irq);
-}
-
-static int __devexit giu_remove(struct platform_device *dev)
-{
- if (giu_base) {
- iounmap(giu_base);
- giu_base = NULL;
- }
-
- return 0;
-}
-
-static struct platform_driver giu_device_driver = {
- .probe = giu_probe,
- .remove = __devexit_p(giu_remove),
- .driver = {
- .name = "GIU",
- .owner = THIS_MODULE,
- },
-};
-
-static int __init vr41xx_giu_init(void)
-{
- return platform_driver_register(&giu_device_driver);
-}
-
-static void __exit vr41xx_giu_exit(void)
-{
- platform_driver_unregister(&giu_device_driver);
-}
-
-module_init(vr41xx_giu_init);
-module_exit(vr41xx_giu_exit);
diff --git a/drivers/char/vt_ioctl.c b/drivers/char/vt_ioctl.c
index e6ce632a393e..7539bed0f7e0 100644
--- a/drivers/char/vt_ioctl.c
+++ b/drivers/char/vt_ioctl.c
@@ -396,7 +396,8 @@ int vt_ioctl(struct tty_struct *tty, struct file * file,
kbd = kbd_table + console;
switch (cmd) {
case TIOCLINUX:
- return tioclinux(tty, arg);
+ ret = tioclinux(tty, arg);
+ break;
case KIOCSOUND:
if (!perm)
goto eperm;
diff --git a/drivers/clocksource/sh_tmu.c b/drivers/clocksource/sh_tmu.c
index 9ffb05f4095d..93c2322feab7 100644
--- a/drivers/clocksource/sh_tmu.c
+++ b/drivers/clocksource/sh_tmu.c
@@ -161,7 +161,7 @@ static void sh_tmu_set_next(struct sh_tmu_priv *p, unsigned long delta,
if (periodic)
sh_tmu_write(p, TCOR, delta);
else
- sh_tmu_write(p, TCOR, 0);
+ sh_tmu_write(p, TCOR, 0xffffffff);
sh_tmu_write(p, TCNT, delta);
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 6e2ec0b18948..b90eda8b3440 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -761,6 +761,10 @@ static struct kobj_type ktype_cpufreq = {
* cpufreq_add_dev - add a CPU device
*
* Adds the cpufreq interface for a CPU device.
+ *
+ * The Oracle says: try running cpufreq registration/unregistration concurrently
+ * with with cpu hotplugging and all hell will break loose. Tried to clean this
+ * mess up, but more thorough testing is needed. - Mathieu
*/
static int cpufreq_add_dev(struct sys_device *sys_dev)
{
@@ -772,9 +776,6 @@ static int cpufreq_add_dev(struct sys_device *sys_dev)
struct sys_device *cpu_sys_dev;
unsigned long flags;
unsigned int j;
-#ifdef CONFIG_SMP
- struct cpufreq_policy *managed_policy;
-#endif
if (cpu_is_offline(cpu))
return 0;
@@ -804,15 +805,12 @@ static int cpufreq_add_dev(struct sys_device *sys_dev)
goto nomem_out;
}
if (!alloc_cpumask_var(&policy->cpus, GFP_KERNEL)) {
- kfree(policy);
ret = -ENOMEM;
- goto nomem_out;
+ goto err_free_policy;
}
if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL)) {
- free_cpumask_var(policy->cpus);
- kfree(policy);
ret = -ENOMEM;
- goto nomem_out;
+ goto err_free_cpumask;
}
policy->cpu = cpu;
@@ -820,7 +818,8 @@ static int cpufreq_add_dev(struct sys_device *sys_dev)
/* Initially set CPU itself as the policy_cpu */
per_cpu(policy_cpu, cpu) = cpu;
- lock_policy_rwsem_write(cpu);
+ ret = (lock_policy_rwsem_write(cpu) < 0);
+ WARN_ON(ret);
init_completion(&policy->kobj_unregister);
INIT_WORK(&policy->update, handle_update);
@@ -833,7 +832,7 @@ static int cpufreq_add_dev(struct sys_device *sys_dev)
ret = cpufreq_driver->init(policy);
if (ret) {
dprintk("initialization failed\n");
- goto err_out;
+ goto err_unlock_policy;
}
policy->user_policy.min = policy->min;
policy->user_policy.max = policy->max;
@@ -852,21 +851,29 @@ static int cpufreq_add_dev(struct sys_device *sys_dev)
#endif
for_each_cpu(j, policy->cpus) {
+ struct cpufreq_policy *managed_policy;
+
if (cpu == j)
continue;
/* Check for existing affected CPUs.
* They may not be aware of it due to CPU Hotplug.
*/
- managed_policy = cpufreq_cpu_get(j); /* FIXME: Where is this released? What about error paths? */
+ managed_policy = cpufreq_cpu_get(j);
if (unlikely(managed_policy)) {
/* Set proper policy_cpu */
unlock_policy_rwsem_write(cpu);
per_cpu(policy_cpu, cpu) = managed_policy->cpu;
- if (lock_policy_rwsem_write(cpu) < 0)
- goto err_out_driver_exit;
+ if (lock_policy_rwsem_write(cpu) < 0) {
+ /* Should not go through policy unlock path */
+ if (cpufreq_driver->exit)
+ cpufreq_driver->exit(policy);
+ ret = -EBUSY;
+ cpufreq_cpu_put(managed_policy);
+ goto err_free_cpumask;
+ }
spin_lock_irqsave(&cpufreq_driver_lock, flags);
cpumask_copy(managed_policy->cpus, policy->cpus);
@@ -877,12 +884,14 @@ static int cpufreq_add_dev(struct sys_device *sys_dev)
ret = sysfs_create_link(&sys_dev->kobj,
&managed_policy->kobj,
"cpufreq");
- if (ret)
- goto err_out_driver_exit;
-
- cpufreq_debug_enable_ratelimit();
- ret = 0;
- goto err_out_driver_exit; /* call driver->exit() */
+ if (!ret)
+ cpufreq_cpu_put(managed_policy);
+ /*
+ * Success. We only needed to be added to the mask.
+ * Call driver->exit() because only the cpu parent of
+ * the kobj needed to call init().
+ */
+ goto out_driver_exit; /* call driver->exit() */
}
}
#endif
@@ -892,25 +901,25 @@ static int cpufreq_add_dev(struct sys_device *sys_dev)
ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq, &sys_dev->kobj,
"cpufreq");
if (ret)
- goto err_out_driver_exit;
+ goto out_driver_exit;
/* set up files for this cpu device */
drv_attr = cpufreq_driver->attr;
while ((drv_attr) && (*drv_attr)) {
ret = sysfs_create_file(&policy->kobj, &((*drv_attr)->attr));
if (ret)
- goto err_out_driver_exit;
+ goto err_out_kobj_put;
drv_attr++;
}
if (cpufreq_driver->get) {
ret = sysfs_create_file(&policy->kobj, &cpuinfo_cur_freq.attr);
if (ret)
- goto err_out_driver_exit;
+ goto err_out_kobj_put;
}
if (cpufreq_driver->target) {
ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr);
if (ret)
- goto err_out_driver_exit;
+ goto err_out_kobj_put;
}
spin_lock_irqsave(&cpufreq_driver_lock, flags);
@@ -922,18 +931,22 @@ static int cpufreq_add_dev(struct sys_device *sys_dev)
/* symlink affected CPUs */
for_each_cpu(j, policy->cpus) {
+ struct cpufreq_policy *managed_policy;
+
if (j == cpu)
continue;
if (!cpu_online(j))
continue;
dprintk("CPU %u already managed, adding link\n", j);
- cpufreq_cpu_get(cpu);
+ managed_policy = cpufreq_cpu_get(cpu);
cpu_sys_dev = get_cpu_sysdev(j);
ret = sysfs_create_link(&cpu_sys_dev->kobj, &policy->kobj,
"cpufreq");
- if (ret)
+ if (ret) {
+ cpufreq_cpu_put(managed_policy);
goto err_out_unregister;
+ }
}
policy->governor = NULL; /* to assure that the starting sequence is
@@ -965,17 +978,20 @@ err_out_unregister:
per_cpu(cpufreq_cpu_data, j) = NULL;
spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
+err_out_kobj_put:
kobject_put(&policy->kobj);
wait_for_completion(&policy->kobj_unregister);
-err_out_driver_exit:
+out_driver_exit:
if (cpufreq_driver->exit)
cpufreq_driver->exit(policy);
-err_out:
+err_unlock_policy:
unlock_policy_rwsem_write(cpu);
+err_free_cpumask:
+ free_cpumask_var(policy->cpus);
+err_free_policy:
kfree(policy);
-
nomem_out:
module_put(cpufreq_driver->owner);
module_out:
@@ -1070,8 +1086,6 @@ static int __cpufreq_remove_dev(struct sys_device *sys_dev)
spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
#endif
- unlock_policy_rwsem_write(cpu);
-
if (cpufreq_driver->target)
__cpufreq_governor(data, CPUFREQ_GOV_STOP);
@@ -1088,6 +1102,8 @@ static int __cpufreq_remove_dev(struct sys_device *sys_dev)
if (cpufreq_driver->exit)
cpufreq_driver->exit(data);
+ unlock_policy_rwsem_write(cpu);
+
free_cpumask_var(data->related_cpus);
free_cpumask_var(data->cpus);
kfree(data);
diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c
index 7fc58af748b4..57490502b21c 100644
--- a/drivers/cpufreq/cpufreq_conservative.c
+++ b/drivers/cpufreq/cpufreq_conservative.c
@@ -63,22 +63,20 @@ struct cpu_dbs_info_s {
unsigned int down_skip;
unsigned int requested_freq;
int cpu;
- unsigned int enable:1;
+ /*
+ * percpu mutex that serializes governor limit change with
+ * do_dbs_timer invocation. We do not want do_dbs_timer to run
+ * when user is changing the governor or limits.
+ */
+ struct mutex timer_mutex;
};
static DEFINE_PER_CPU(struct cpu_dbs_info_s, cpu_dbs_info);
static unsigned int dbs_enable; /* number of CPUs using this policy */
/*
- * DEADLOCK ALERT! There is a ordering requirement between cpu_hotplug
- * lock and dbs_mutex. cpu_hotplug lock should always be held before
- * dbs_mutex. If any function that can potentially take cpu_hotplug lock
- * (like __cpufreq_driver_target()) is being called with dbs_mutex taken, then
- * cpu_hotplug lock should be taken before that. Note that cpu_hotplug lock
- * is recursive for the same process. -Venki
- * DEADLOCK ALERT! (2) : do_dbs_timer() must not take the dbs_mutex, because it
- * would deadlock with cancel_delayed_work_sync(), which is needed for proper
- * raceless workqueue teardown.
+ * dbs_mutex protects data in dbs_tuners_ins from concurrent changes on
+ * different CPUs. It protects dbs_enable in governor start/stop.
*/
static DEFINE_MUTEX(dbs_mutex);
@@ -143,9 +141,6 @@ dbs_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
struct cpufreq_policy *policy;
- if (!this_dbs_info->enable)
- return 0;
-
policy = this_dbs_info->cur_policy;
/*
@@ -488,18 +483,12 @@ static void do_dbs_timer(struct work_struct *work)
delay -= jiffies % delay;
- if (lock_policy_rwsem_write(cpu) < 0)
- return;
-
- if (!dbs_info->enable) {
- unlock_policy_rwsem_write(cpu);
- return;
- }
+ mutex_lock(&dbs_info->timer_mutex);
dbs_check_cpu(dbs_info);
queue_delayed_work_on(cpu, kconservative_wq, &dbs_info->work, delay);
- unlock_policy_rwsem_write(cpu);
+ mutex_unlock(&dbs_info->timer_mutex);
}
static inline void dbs_timer_init(struct cpu_dbs_info_s *dbs_info)
@@ -508,7 +497,6 @@ static inline void dbs_timer_init(struct cpu_dbs_info_s *dbs_info)
int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate);
delay -= jiffies % delay;
- dbs_info->enable = 1;
INIT_DELAYED_WORK_DEFERRABLE(&dbs_info->work, do_dbs_timer);
queue_delayed_work_on(dbs_info->cpu, kconservative_wq, &dbs_info->work,
delay);
@@ -516,7 +504,6 @@ static inline void dbs_timer_init(struct cpu_dbs_info_s *dbs_info)
static inline void dbs_timer_exit(struct cpu_dbs_info_s *dbs_info)
{
- dbs_info->enable = 0;
cancel_delayed_work_sync(&dbs_info->work);
}
@@ -535,9 +522,6 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
if ((!cpu_online(cpu)) || (!policy->cur))
return -EINVAL;
- if (this_dbs_info->enable) /* Already enabled */
- break;
-
mutex_lock(&dbs_mutex);
rc = sysfs_create_group(&policy->kobj, &dbs_attr_group);
@@ -561,6 +545,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
this_dbs_info->down_skip = 0;
this_dbs_info->requested_freq = policy->cur;
+ mutex_init(&this_dbs_info->timer_mutex);
dbs_enable++;
/*
* Start the timerschedule work, when this governor
@@ -590,17 +575,19 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
&dbs_cpufreq_notifier_block,
CPUFREQ_TRANSITION_NOTIFIER);
}
- dbs_timer_init(this_dbs_info);
-
mutex_unlock(&dbs_mutex);
+ dbs_timer_init(this_dbs_info);
+
break;
case CPUFREQ_GOV_STOP:
- mutex_lock(&dbs_mutex);
dbs_timer_exit(this_dbs_info);
+
+ mutex_lock(&dbs_mutex);
sysfs_remove_group(&policy->kobj, &dbs_attr_group);
dbs_enable--;
+ mutex_destroy(&this_dbs_info->timer_mutex);
/*
* Stop the timerschedule work, when this governor
@@ -616,7 +603,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
break;
case CPUFREQ_GOV_LIMITS:
- mutex_lock(&dbs_mutex);
+ mutex_lock(&this_dbs_info->timer_mutex);
if (policy->max < this_dbs_info->cur_policy->cur)
__cpufreq_driver_target(
this_dbs_info->cur_policy,
@@ -625,7 +612,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
__cpufreq_driver_target(
this_dbs_info->cur_policy,
policy->min, CPUFREQ_RELATION_L);
- mutex_unlock(&dbs_mutex);
+ mutex_unlock(&this_dbs_info->timer_mutex);
break;
}
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
index 1911d1729353..d6ba14276bb1 100644
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -70,23 +70,21 @@ struct cpu_dbs_info_s {
unsigned int freq_lo_jiffies;
unsigned int freq_hi_jiffies;
int cpu;
- unsigned int enable:1,
- sample_type:1;
+ unsigned int sample_type:1;
+ /*
+ * percpu mutex that serializes governor limit change with
+ * do_dbs_timer invocation. We do not want do_dbs_timer to run
+ * when user is changing the governor or limits.
+ */
+ struct mutex timer_mutex;
};
static DEFINE_PER_CPU(struct cpu_dbs_info_s, cpu_dbs_info);
static unsigned int dbs_enable; /* number of CPUs using this policy */
/*
- * DEADLOCK ALERT! There is a ordering requirement between cpu_hotplug
- * lock and dbs_mutex. cpu_hotplug lock should always be held before
- * dbs_mutex. If any function that can potentially take cpu_hotplug lock
- * (like __cpufreq_driver_target()) is being called with dbs_mutex taken, then
- * cpu_hotplug lock should be taken before that. Note that cpu_hotplug lock
- * is recursive for the same process. -Venki
- * DEADLOCK ALERT! (2) : do_dbs_timer() must not take the dbs_mutex, because it
- * would deadlock with cancel_delayed_work_sync(), which is needed for proper
- * raceless workqueue teardown.
+ * dbs_mutex protects data in dbs_tuners_ins from concurrent changes on
+ * different CPUs. It protects dbs_enable in governor start/stop.
*/
static DEFINE_MUTEX(dbs_mutex);
@@ -192,13 +190,18 @@ static unsigned int powersave_bias_target(struct cpufreq_policy *policy,
return freq_hi;
}
+static void ondemand_powersave_bias_init_cpu(int cpu)
+{
+ struct cpu_dbs_info_s *dbs_info = &per_cpu(cpu_dbs_info, cpu);
+ dbs_info->freq_table = cpufreq_frequency_get_table(cpu);
+ dbs_info->freq_lo = 0;
+}
+
static void ondemand_powersave_bias_init(void)
{
int i;
for_each_online_cpu(i) {
- struct cpu_dbs_info_s *dbs_info = &per_cpu(cpu_dbs_info, i);
- dbs_info->freq_table = cpufreq_frequency_get_table(i);
- dbs_info->freq_lo = 0;
+ ondemand_powersave_bias_init_cpu(i);
}
}
@@ -240,12 +243,10 @@ static ssize_t store_sampling_rate(struct cpufreq_policy *unused,
unsigned int input;
int ret;
ret = sscanf(buf, "%u", &input);
+ if (ret != 1)
+ return -EINVAL;
mutex_lock(&dbs_mutex);
- if (ret != 1) {
- mutex_unlock(&dbs_mutex);
- return -EINVAL;
- }
dbs_tuners_ins.sampling_rate = max(input, min_sampling_rate);
mutex_unlock(&dbs_mutex);
@@ -259,13 +260,12 @@ static ssize_t store_up_threshold(struct cpufreq_policy *unused,
int ret;
ret = sscanf(buf, "%u", &input);
- mutex_lock(&dbs_mutex);
if (ret != 1 || input > MAX_FREQUENCY_UP_THRESHOLD ||
input < MIN_FREQUENCY_UP_THRESHOLD) {
- mutex_unlock(&dbs_mutex);
return -EINVAL;
}
+ mutex_lock(&dbs_mutex);
dbs_tuners_ins.up_threshold = input;
mutex_unlock(&dbs_mutex);
@@ -363,9 +363,6 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
struct cpufreq_policy *policy;
unsigned int j;
- if (!this_dbs_info->enable)
- return;
-
this_dbs_info->freq_lo = 0;
policy = this_dbs_info->cur_policy;
@@ -493,14 +490,7 @@ static void do_dbs_timer(struct work_struct *work)
int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate);
delay -= jiffies % delay;
-
- if (lock_policy_rwsem_write(cpu) < 0)
- return;
-
- if (!dbs_info->enable) {
- unlock_policy_rwsem_write(cpu);
- return;
- }
+ mutex_lock(&dbs_info->timer_mutex);
/* Common NORMAL_SAMPLE setup */
dbs_info->sample_type = DBS_NORMAL_SAMPLE;
@@ -517,7 +507,7 @@ static void do_dbs_timer(struct work_struct *work)
dbs_info->freq_lo, CPUFREQ_RELATION_H);
}
queue_delayed_work_on(cpu, kondemand_wq, &dbs_info->work, delay);
- unlock_policy_rwsem_write(cpu);
+ mutex_unlock(&dbs_info->timer_mutex);
}
static inline void dbs_timer_init(struct cpu_dbs_info_s *dbs_info)
@@ -526,8 +516,6 @@ static inline void dbs_timer_init(struct cpu_dbs_info_s *dbs_info)
int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate);
delay -= jiffies % delay;
- dbs_info->enable = 1;
- ondemand_powersave_bias_init();
dbs_info->sample_type = DBS_NORMAL_SAMPLE;
INIT_DELAYED_WORK_DEFERRABLE(&dbs_info->work, do_dbs_timer);
queue_delayed_work_on(dbs_info->cpu, kondemand_wq, &dbs_info->work,
@@ -536,7 +524,6 @@ static inline void dbs_timer_init(struct cpu_dbs_info_s *dbs_info)
static inline void dbs_timer_exit(struct cpu_dbs_info_s *dbs_info)
{
- dbs_info->enable = 0;
cancel_delayed_work_sync(&dbs_info->work);
}
@@ -555,19 +542,15 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
if ((!cpu_online(cpu)) || (!policy->cur))
return -EINVAL;
- if (this_dbs_info->enable) /* Already enabled */
- break;
-
mutex_lock(&dbs_mutex);
- dbs_enable++;
rc = sysfs_create_group(&policy->kobj, &dbs_attr_group);
if (rc) {
- dbs_enable--;
mutex_unlock(&dbs_mutex);
return rc;
}
+ dbs_enable++;
for_each_cpu(j, policy->cpus) {
struct cpu_dbs_info_s *j_dbs_info;
j_dbs_info = &per_cpu(cpu_dbs_info, j);
@@ -581,6 +564,8 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
}
}
this_dbs_info->cpu = cpu;
+ ondemand_powersave_bias_init_cpu(cpu);
+ mutex_init(&this_dbs_info->timer_mutex);
/*
* Start the timerschedule work, when this governor
* is used for first time
@@ -598,29 +583,31 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
max(min_sampling_rate,
latency * LATENCY_MULTIPLIER);
}
- dbs_timer_init(this_dbs_info);
-
mutex_unlock(&dbs_mutex);
+
+ dbs_timer_init(this_dbs_info);
break;
case CPUFREQ_GOV_STOP:
- mutex_lock(&dbs_mutex);
dbs_timer_exit(this_dbs_info);
+
+ mutex_lock(&dbs_mutex);
sysfs_remove_group(&policy->kobj, &dbs_attr_group);
+ mutex_destroy(&this_dbs_info->timer_mutex);
dbs_enable--;
mutex_unlock(&dbs_mutex);
break;
case CPUFREQ_GOV_LIMITS:
- mutex_lock(&dbs_mutex);
+ mutex_lock(&this_dbs_info->timer_mutex);
if (policy->max < this_dbs_info->cur_policy->cur)
__cpufreq_driver_target(this_dbs_info->cur_policy,
policy->max, CPUFREQ_RELATION_H);
else if (policy->min > this_dbs_info->cur_policy->cur)
__cpufreq_driver_target(this_dbs_info->cur_policy,
policy->min, CPUFREQ_RELATION_L);
- mutex_unlock(&dbs_mutex);
+ mutex_unlock(&this_dbs_info->timer_mutex);
break;
}
return 0;
diff --git a/drivers/crypto/padlock-aes.c b/drivers/crypto/padlock-aes.c
index 87f92c39b5f0..a9952b1236b0 100644
--- a/drivers/crypto/padlock-aes.c
+++ b/drivers/crypto/padlock-aes.c
@@ -18,9 +18,22 @@
#include <linux/percpu.h>
#include <linux/smp.h>
#include <asm/byteorder.h>
+#include <asm/processor.h>
#include <asm/i387.h>
#include "padlock.h"
+/*
+ * Number of data blocks actually fetched for each xcrypt insn.
+ * Processors with prefetch errata will fetch extra blocks.
+ */
+static unsigned int ecb_fetch_blocks = 2;
+#define MAX_ECB_FETCH_BLOCKS (8)
+#define ecb_fetch_bytes (ecb_fetch_blocks * AES_BLOCK_SIZE)
+
+static unsigned int cbc_fetch_blocks = 1;
+#define MAX_CBC_FETCH_BLOCKS (4)
+#define cbc_fetch_bytes (cbc_fetch_blocks * AES_BLOCK_SIZE)
+
/* Control word. */
struct cword {
unsigned int __attribute__ ((__packed__))
@@ -172,73 +185,111 @@ static inline void padlock_store_cword(struct cword *cword)
* should be used only inside the irq_ts_save/restore() context
*/
-static inline void padlock_xcrypt(const u8 *input, u8 *output, void *key,
- struct cword *control_word)
+static inline void rep_xcrypt_ecb(const u8 *input, u8 *output, void *key,
+ struct cword *control_word, int count)
{
asm volatile (".byte 0xf3,0x0f,0xa7,0xc8" /* rep xcryptecb */
: "+S"(input), "+D"(output)
- : "d"(control_word), "b"(key), "c"(1));
+ : "d"(control_word), "b"(key), "c"(count));
+}
+
+static inline u8 *rep_xcrypt_cbc(const u8 *input, u8 *output, void *key,
+ u8 *iv, struct cword *control_word, int count)
+{
+ asm volatile (".byte 0xf3,0x0f,0xa7,0xd0" /* rep xcryptcbc */
+ : "+S" (input), "+D" (output), "+a" (iv)
+ : "d" (control_word), "b" (key), "c" (count));
+ return iv;
}
-static void aes_crypt_copy(const u8 *in, u8 *out, u32 *key, struct cword *cword)
+static void ecb_crypt_copy(const u8 *in, u8 *out, u32 *key,
+ struct cword *cword, int count)
{
- u8 buf[AES_BLOCK_SIZE * 2 + PADLOCK_ALIGNMENT - 1];
+ /*
+ * Padlock prefetches extra data so we must provide mapped input buffers.
+ * Assume there are at least 16 bytes of stack already in use.
+ */
+ u8 buf[AES_BLOCK_SIZE * (MAX_ECB_FETCH_BLOCKS - 1) + PADLOCK_ALIGNMENT - 1];
+ u8 *tmp = PTR_ALIGN(&buf[0], PADLOCK_ALIGNMENT);
+
+ memcpy(tmp, in, count * AES_BLOCK_SIZE);
+ rep_xcrypt_ecb(tmp, out, key, cword, count);
+}
+
+static u8 *cbc_crypt_copy(const u8 *in, u8 *out, u32 *key,
+ u8 *iv, struct cword *cword, int count)
+{
+ /*
+ * Padlock prefetches extra data so we must provide mapped input buffers.
+ * Assume there are at least 16 bytes of stack already in use.
+ */
+ u8 buf[AES_BLOCK_SIZE * (MAX_CBC_FETCH_BLOCKS - 1) + PADLOCK_ALIGNMENT - 1];
u8 *tmp = PTR_ALIGN(&buf[0], PADLOCK_ALIGNMENT);
- memcpy(tmp, in, AES_BLOCK_SIZE);
- padlock_xcrypt(tmp, out, key, cword);
+ memcpy(tmp, in, count * AES_BLOCK_SIZE);
+ return rep_xcrypt_cbc(tmp, out, key, iv, cword, count);
}
-static inline void aes_crypt(const u8 *in, u8 *out, u32 *key,
- struct cword *cword)
+static inline void ecb_crypt(const u8 *in, u8 *out, u32 *key,
+ struct cword *cword, int count)
{
- /* padlock_xcrypt requires at least two blocks of data. */
- if (unlikely(!(((unsigned long)in ^ (PAGE_SIZE - AES_BLOCK_SIZE)) &
- (PAGE_SIZE - 1)))) {
- aes_crypt_copy(in, out, key, cword);
+ /* Padlock in ECB mode fetches at least ecb_fetch_bytes of data.
+ * We could avoid some copying here but it's probably not worth it.
+ */
+ if (unlikely(((unsigned long)in & PAGE_SIZE) + ecb_fetch_bytes > PAGE_SIZE)) {
+ ecb_crypt_copy(in, out, key, cword, count);
return;
}
- padlock_xcrypt(in, out, key, cword);
+ rep_xcrypt_ecb(in, out, key, cword, count);
+}
+
+static inline u8 *cbc_crypt(const u8 *in, u8 *out, u32 *key,
+ u8 *iv, struct cword *cword, int count)
+{
+ /* Padlock in CBC mode fetches at least cbc_fetch_bytes of data. */
+ if (unlikely(((unsigned long)in & PAGE_SIZE) + cbc_fetch_bytes > PAGE_SIZE))
+ return cbc_crypt_copy(in, out, key, iv, cword, count);
+
+ return rep_xcrypt_cbc(in, out, key, iv, cword, count);
}
static inline void padlock_xcrypt_ecb(const u8 *input, u8 *output, void *key,
void *control_word, u32 count)
{
- if (count == 1) {
- aes_crypt(input, output, key, control_word);
+ u32 initial = count & (ecb_fetch_blocks - 1);
+
+ if (count < ecb_fetch_blocks) {
+ ecb_crypt(input, output, key, control_word, count);
return;
}
- asm volatile ("test $1, %%cl;"
- "je 1f;"
-#ifndef CONFIG_X86_64
- "lea -1(%%ecx), %%eax;"
- "mov $1, %%ecx;"
-#else
- "lea -1(%%rcx), %%rax;"
- "mov $1, %%rcx;"
-#endif
- ".byte 0xf3,0x0f,0xa7,0xc8;" /* rep xcryptecb */
-#ifndef CONFIG_X86_64
- "mov %%eax, %%ecx;"
-#else
- "mov %%rax, %%rcx;"
-#endif
- "1:"
- ".byte 0xf3,0x0f,0xa7,0xc8" /* rep xcryptecb */
+ if (initial)
+ asm volatile (".byte 0xf3,0x0f,0xa7,0xc8" /* rep xcryptecb */
+ : "+S"(input), "+D"(output)
+ : "d"(control_word), "b"(key), "c"(initial));
+
+ asm volatile (".byte 0xf3,0x0f,0xa7,0xc8" /* rep xcryptecb */
: "+S"(input), "+D"(output)
- : "d"(control_word), "b"(key), "c"(count)
- : "ax");
+ : "d"(control_word), "b"(key), "c"(count - initial));
}
static inline u8 *padlock_xcrypt_cbc(const u8 *input, u8 *output, void *key,
u8 *iv, void *control_word, u32 count)
{
- /* rep xcryptcbc */
- asm volatile (".byte 0xf3,0x0f,0xa7,0xd0"
+ u32 initial = count & (cbc_fetch_blocks - 1);
+
+ if (count < cbc_fetch_blocks)
+ return cbc_crypt(input, output, key, iv, control_word, count);
+
+ if (initial)
+ asm volatile (".byte 0xf3,0x0f,0xa7,0xd0" /* rep xcryptcbc */
+ : "+S" (input), "+D" (output), "+a" (iv)
+ : "d" (control_word), "b" (key), "c" (count));
+
+ asm volatile (".byte 0xf3,0x0f,0xa7,0xd0" /* rep xcryptcbc */
: "+S" (input), "+D" (output), "+a" (iv)
- : "d" (control_word), "b" (key), "c" (count));
+ : "d" (control_word), "b" (key), "c" (count-initial));
return iv;
}
@@ -249,7 +300,7 @@ static void aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
padlock_reset_key(&ctx->cword.encrypt);
ts_state = irq_ts_save();
- aes_crypt(in, out, ctx->E, &ctx->cword.encrypt);
+ ecb_crypt(in, out, ctx->E, &ctx->cword.encrypt, 1);
irq_ts_restore(ts_state);
padlock_store_cword(&ctx->cword.encrypt);
}
@@ -261,7 +312,7 @@ static void aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
padlock_reset_key(&ctx->cword.encrypt);
ts_state = irq_ts_save();
- aes_crypt(in, out, ctx->D, &ctx->cword.decrypt);
+ ecb_crypt(in, out, ctx->D, &ctx->cword.decrypt, 1);
irq_ts_restore(ts_state);
padlock_store_cword(&ctx->cword.encrypt);
}
@@ -454,6 +505,7 @@ static struct crypto_alg cbc_aes_alg = {
static int __init padlock_init(void)
{
int ret;
+ struct cpuinfo_x86 *c = &cpu_data(0);
if (!cpu_has_xcrypt) {
printk(KERN_NOTICE PFX "VIA PadLock not detected.\n");
@@ -476,6 +528,12 @@ static int __init padlock_init(void)
printk(KERN_NOTICE PFX "Using VIA PadLock ACE for AES algorithm.\n");
+ if (c->x86 == 6 && c->x86_model == 15 && c->x86_mask == 2) {
+ ecb_fetch_blocks = MAX_ECB_FETCH_BLOCKS;
+ cbc_fetch_blocks = MAX_CBC_FETCH_BLOCKS;
+ printk(KERN_NOTICE PFX "VIA Nano stepping 2 detected: enabling workaround.\n");
+ }
+
out:
return ret;
diff --git a/drivers/dma/txx9dmac.c b/drivers/dma/txx9dmac.c
index 9aa9ea9822c8..88dab52926f4 100644
--- a/drivers/dma/txx9dmac.c
+++ b/drivers/dma/txx9dmac.c
@@ -432,23 +432,27 @@ txx9dmac_descriptor_complete(struct txx9dmac_chan *dc,
list_splice_init(&txd->tx_list, &dc->free_list);
list_move(&desc->desc_node, &dc->free_list);
- /*
- * We use dma_unmap_page() regardless of how the buffers were
- * mapped before they were submitted...
- */
if (!ds) {
dma_addr_t dmaaddr;
if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
dmaaddr = is_dmac64(dc) ?
desc->hwdesc.DAR : desc->hwdesc32.DAR;
- dma_unmap_page(chan2parent(&dc->chan), dmaaddr,
- desc->len, DMA_FROM_DEVICE);
+ if (txd->flags & DMA_COMPL_DEST_UNMAP_SINGLE)
+ dma_unmap_single(chan2parent(&dc->chan),
+ dmaaddr, desc->len, DMA_FROM_DEVICE);
+ else
+ dma_unmap_page(chan2parent(&dc->chan),
+ dmaaddr, desc->len, DMA_FROM_DEVICE);
}
if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
dmaaddr = is_dmac64(dc) ?
desc->hwdesc.SAR : desc->hwdesc32.SAR;
- dma_unmap_page(chan2parent(&dc->chan), dmaaddr,
- desc->len, DMA_TO_DEVICE);
+ if (txd->flags & DMA_COMPL_SRC_UNMAP_SINGLE)
+ dma_unmap_single(chan2parent(&dc->chan),
+ dmaaddr, desc->len, DMA_TO_DEVICE);
+ else
+ dma_unmap_page(chan2parent(&dc->chan),
+ dmaaddr, desc->len, DMA_TO_DEVICE);
}
}
diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
index c36bf40568cf..858fe6037223 100644
--- a/drivers/edac/amd64_edac.c
+++ b/drivers/edac/amd64_edac.c
@@ -754,13 +754,13 @@ static void amd64_cpu_display_info(struct amd64_pvt *pvt)
static enum edac_type amd64_determine_edac_cap(struct amd64_pvt *pvt)
{
int bit;
- enum dev_type edac_cap = EDAC_NONE;
+ enum dev_type edac_cap = EDAC_FLAG_NONE;
bit = (boot_cpu_data.x86 > 0xf || pvt->ext_model >= OPTERON_CPU_REV_F)
? 19
: 17;
- if (pvt->dclr0 >> BIT(bit))
+ if (pvt->dclr0 & BIT(bit))
edac_cap = EDAC_FLAG_SECDED;
return edac_cap;
@@ -1269,7 +1269,7 @@ static int f10_early_channel_count(struct amd64_pvt *pvt)
if (channels == 0)
channels = 1;
- debugf0("DIMM count= %d\n", channels);
+ debugf0("MCT channel count: %d\n", channels);
return channels;
@@ -2966,7 +2966,12 @@ static int amd64_check_ecc_enabled(struct amd64_pvt *pvt)
" Use of the override can cause "
"unknown side effects.\n");
ret = -ENODEV;
- }
+ } else
+ /*
+ * enable further driver loading if ECC enable is
+ * overridden.
+ */
+ ret = 0;
} else {
amd64_printk(KERN_INFO,
"ECC is enabled by BIOS, Proceeding "
@@ -3006,7 +3011,6 @@ static void amd64_setup_mci_misc_attributes(struct mem_ctl_info *mci)
mci->mtype_cap = MEM_FLAG_DDR2 | MEM_FLAG_RDDR2;
mci->edac_ctl_cap = EDAC_FLAG_NONE;
- mci->edac_cap = EDAC_FLAG_NONE;
if (pvt->nbcap & K8_NBCAP_SECDED)
mci->edac_ctl_cap |= EDAC_FLAG_SECDED;
@@ -3052,7 +3056,7 @@ static int amd64_probe_one_instance(struct pci_dev *dram_f2_ctl,
if (!pvt)
goto err_exit;
- pvt->mc_node_id = get_mc_node_id_from_pdev(dram_f2_ctl);
+ pvt->mc_node_id = get_node_id(dram_f2_ctl);
pvt->dram_f2_ctl = dram_f2_ctl;
pvt->ext_model = boot_cpu_data.x86_model >> 4;
@@ -3179,8 +3183,7 @@ static int __devinit amd64_init_one_instance(struct pci_dev *pdev,
{
int ret = 0;
- debugf0("(MC node=%d,mc_type='%s')\n",
- get_mc_node_id_from_pdev(pdev),
+ debugf0("(MC node=%d,mc_type='%s')\n", get_node_id(pdev),
get_amd_family_name(mc_type->driver_data));
ret = pci_enable_device(pdev);
@@ -3319,15 +3322,17 @@ static int __init amd64_edac_init(void)
err = amd64_init_2nd_stage(pvt_lookup[nb]);
if (err)
- goto err_exit;
+ goto err_2nd_stage;
}
amd64_setup_pci_device();
return 0;
+err_2nd_stage:
+ debugf0("2nd stage failed\n");
+
err_exit:
- debugf0("'finish_setup' stage failed\n");
pci_unregister_driver(&amd64_pci_driver);
return err;
diff --git a/drivers/edac/amd64_edac.h b/drivers/edac/amd64_edac.h
index a159957e167b..ba73015af8e4 100644
--- a/drivers/edac/amd64_edac.h
+++ b/drivers/edac/amd64_edac.h
@@ -444,7 +444,7 @@ enum {
#define K8_MSR_MC4ADDR 0x0412
/* AMD sets the first MC device at device ID 0x18. */
-static inline int get_mc_node_id_from_pdev(struct pci_dev *pdev)
+static inline int get_node_id(struct pci_dev *pdev)
{
return PCI_SLOT(pdev->devfn) - 0x18;
}
diff --git a/drivers/edac/edac_core.h b/drivers/edac/edac_core.h
index 3493c6bdb820..871c13b4c148 100644
--- a/drivers/edac/edac_core.h
+++ b/drivers/edac/edac_core.h
@@ -150,6 +150,8 @@ enum mem_type {
MEM_FB_DDR2, /* fully buffered DDR2 */
MEM_RDDR2, /* Registered DDR2 RAM */
MEM_XDR, /* Rambus XDR */
+ MEM_DDR3, /* DDR3 RAM */
+ MEM_RDDR3, /* Registered DDR3 RAM */
};
#define MEM_FLAG_EMPTY BIT(MEM_EMPTY)
@@ -167,6 +169,8 @@ enum mem_type {
#define MEM_FLAG_FB_DDR2 BIT(MEM_FB_DDR2)
#define MEM_FLAG_RDDR2 BIT(MEM_RDDR2)
#define MEM_FLAG_XDR BIT(MEM_XDR)
+#define MEM_FLAG_DDR3 BIT(MEM_DDR3)
+#define MEM_FLAG_RDDR3 BIT(MEM_RDDR3)
/* chipset Error Detection and Correction capabilities and mode */
enum edac_type {
diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c
index ad218fe4942d..e1d4ce083481 100644
--- a/drivers/edac/edac_mc_sysfs.c
+++ b/drivers/edac/edac_mc_sysfs.c
@@ -94,7 +94,9 @@ static const char *mem_types[] = {
[MEM_DDR2] = "Unbuffered-DDR2",
[MEM_FB_DDR2] = "FullyBuffered-DDR2",
[MEM_RDDR2] = "Registered-DDR2",
- [MEM_XDR] = "XDR"
+ [MEM_XDR] = "XDR",
+ [MEM_DDR3] = "Unbuffered-DDR3",
+ [MEM_RDDR3] = "Registered-DDR3"
};
static const char *dev_types[] = {
diff --git a/drivers/edac/mpc85xx_edac.c b/drivers/edac/mpc85xx_edac.c
index 7c8c2d72916f..3f2ccfc6407c 100644
--- a/drivers/edac/mpc85xx_edac.c
+++ b/drivers/edac/mpc85xx_edac.c
@@ -757,6 +757,9 @@ static void __devinit mpc85xx_init_csrows(struct mem_ctl_info *mci)
case DSC_SDTYPE_DDR2:
mtype = MEM_RDDR2;
break;
+ case DSC_SDTYPE_DDR3:
+ mtype = MEM_RDDR3;
+ break;
default:
mtype = MEM_UNKNOWN;
break;
@@ -769,6 +772,9 @@ static void __devinit mpc85xx_init_csrows(struct mem_ctl_info *mci)
case DSC_SDTYPE_DDR2:
mtype = MEM_DDR2;
break;
+ case DSC_SDTYPE_DDR3:
+ mtype = MEM_DDR3;
+ break;
default:
mtype = MEM_UNKNOWN;
break;
diff --git a/drivers/edac/mpc85xx_edac.h b/drivers/edac/mpc85xx_edac.h
index 135b3539a030..52432ee7c4b9 100644
--- a/drivers/edac/mpc85xx_edac.h
+++ b/drivers/edac/mpc85xx_edac.h
@@ -53,6 +53,7 @@
#define DSC_SDTYPE_DDR 0x02000000
#define DSC_SDTYPE_DDR2 0x03000000
+#define DSC_SDTYPE_DDR3 0x07000000
#define DSC_X32_EN 0x00000020
/* Err_Int_En */
diff --git a/drivers/firewire/Kconfig b/drivers/firewire/Kconfig
index 450902438208..13efcd362072 100644
--- a/drivers/firewire/Kconfig
+++ b/drivers/firewire/Kconfig
@@ -1,28 +1,29 @@
-comment "A new alternative FireWire stack is available with EXPERIMENTAL=y"
- depends on EXPERIMENTAL=n
-
-comment "Enable only one of the two stacks, unless you know what you are doing"
- depends on EXPERIMENTAL
+comment "You can enable one or both FireWire driver stacks."
+comment "See the help texts for more information."
config FIREWIRE
- tristate "New FireWire stack, EXPERIMENTAL"
- depends on EXPERIMENTAL
+ tristate "FireWire driver stack"
select CRC_ITU_T
help
- This is the "Juju" FireWire stack, a new alternative implementation
- designed for robustness and simplicity. You can build either this
- stack, or the old stack (the ieee1394 driver, ohci1394 etc.) or both.
- Please read http://ieee1394.wiki.kernel.org/index.php/Juju_Migration
- before you enable the new stack.
+ This is the new-generation IEEE 1394 (FireWire) driver stack
+ a.k.a. Juju, a new implementation designed for robustness and
+ simplicity.
+ See http://ieee1394.wiki.kernel.org/index.php/Juju_Migration
+ for information about migration from the older Linux 1394 stack
+ to the new driver stack.
To compile this driver as a module, say M here: the module will be
called firewire-core.
This module functionally replaces ieee1394, raw1394, and video1394.
To access it from application programs, you generally need at least
- libraw1394 version 2. IIDC/DCAM applications also need libdc1394
- version 2. No libraries are required to access storage devices
- through the firewire-sbp2 driver.
+ libraw1394 v2. IIDC/DCAM applications need libdc1394 v2.
+ No libraries are required to access storage devices through the
+ firewire-sbp2 driver.
+
+ NOTE:
+ FireWire audio devices currently require the old drivers (ieee1394,
+ ohci1394, raw1394).
config FIREWIRE_OHCI
tristate "OHCI-1394 controllers"
@@ -37,11 +38,9 @@ config FIREWIRE_OHCI
stack.
NOTE:
-
- You should only build either firewire-ohci or the old ohci1394 driver,
- but not both. If you nevertheless want to install both, you should
- configure them only as modules and blacklist the driver(s) which you
- don't want to have auto-loaded. Add either
+ If you want to install firewire-ohci and ohci1394 together, you
+ should configure them only as modules and blacklist the driver(s)
+ which you don't want to have auto-loaded. Add either
blacklist firewire-ohci
or
@@ -50,12 +49,7 @@ config FIREWIRE_OHCI
blacklist dv1394
to /etc/modprobe.conf or /etc/modprobe.d/* and update modprobe.conf
- depending on your distribution. The latter two modules should be
- blacklisted together with ohci1394 because they depend on ohci1394.
-
- If you have an old modprobe which doesn't implement the blacklist
- directive, use "install modulename /bin/true" for the modules to be
- blacklisted.
+ depending on your distribution.
config FIREWIRE_OHCI_DEBUG
bool
@@ -77,3 +71,17 @@ config FIREWIRE_SBP2
You should also enable support for disks, CD-ROMs, etc. in the SCSI
configuration section.
+
+config FIREWIRE_NET
+ tristate "IP networking over 1394 (EXPERIMENTAL)"
+ depends on FIREWIRE && INET && EXPERIMENTAL
+ help
+ This enables IPv4 over IEEE 1394, providing IP connectivity with
+ other implementations of RFC 2734 as found on several operating
+ systems. Multicast support is currently limited.
+
+ NOTE, this driver is not stable yet!
+
+ To compile this driver as a module, say M here: The module will be
+ called firewire-net. It replaces eth1394 of the classic IEEE 1394
+ stack.
diff --git a/drivers/firewire/Makefile b/drivers/firewire/Makefile
index bc3b9bf822bf..a8f9bb6d9fdf 100644
--- a/drivers/firewire/Makefile
+++ b/drivers/firewire/Makefile
@@ -6,7 +6,9 @@ firewire-core-y += core-card.o core-cdev.o core-device.o \
core-iso.o core-topology.o core-transaction.o
firewire-ohci-y += ohci.o
firewire-sbp2-y += sbp2.o
+firewire-net-y += net.o
-obj-$(CONFIG_FIREWIRE) += firewire-core.o
+obj-$(CONFIG_FIREWIRE) += firewire-core.o
obj-$(CONFIG_FIREWIRE_OHCI) += firewire-ohci.o
obj-$(CONFIG_FIREWIRE_SBP2) += firewire-sbp2.o
+obj-$(CONFIG_FIREWIRE_NET) += firewire-net.o
diff --git a/drivers/firewire/core-card.c b/drivers/firewire/core-card.c
index 4c1be64fdddd..f74edae5cb4c 100644
--- a/drivers/firewire/core-card.c
+++ b/drivers/firewire/core-card.c
@@ -176,6 +176,7 @@ int fw_core_add_descriptor(struct fw_descriptor *desc)
return 0;
}
+EXPORT_SYMBOL(fw_core_add_descriptor);
void fw_core_remove_descriptor(struct fw_descriptor *desc)
{
@@ -189,13 +190,14 @@ void fw_core_remove_descriptor(struct fw_descriptor *desc)
mutex_unlock(&card_mutex);
}
+EXPORT_SYMBOL(fw_core_remove_descriptor);
static void allocate_broadcast_channel(struct fw_card *card, int generation)
{
int channel, bandwidth = 0;
- fw_iso_resource_manage(card, generation, 1ULL << 31,
- &channel, &bandwidth, true);
+ fw_iso_resource_manage(card, generation, 1ULL << 31, &channel,
+ &bandwidth, true, card->bm_transaction_data);
if (channel == 31) {
card->broadcast_channel_allocated = true;
device_for_each_child(card->device, (void *)(long)generation,
@@ -228,7 +230,6 @@ static void fw_card_bm_work(struct work_struct *work)
bool do_reset = false;
bool root_device_is_running;
bool root_device_is_cmc;
- __be32 lock_data[2];
spin_lock_irqsave(&card->lock, flags);
@@ -271,22 +272,23 @@ static void fw_card_bm_work(struct work_struct *work)
goto pick_me;
}
- lock_data[0] = cpu_to_be32(0x3f);
- lock_data[1] = cpu_to_be32(local_id);
+ card->bm_transaction_data[0] = cpu_to_be32(0x3f);
+ card->bm_transaction_data[1] = cpu_to_be32(local_id);
spin_unlock_irqrestore(&card->lock, flags);
rcode = fw_run_transaction(card, TCODE_LOCK_COMPARE_SWAP,
irm_id, generation, SCODE_100,
CSR_REGISTER_BASE + CSR_BUS_MANAGER_ID,
- lock_data, sizeof(lock_data));
+ card->bm_transaction_data,
+ sizeof(card->bm_transaction_data));
if (rcode == RCODE_GENERATION)
/* Another bus reset, BM work has been rescheduled. */
goto out;
if (rcode == RCODE_COMPLETE &&
- lock_data[0] != cpu_to_be32(0x3f)) {
+ card->bm_transaction_data[0] != cpu_to_be32(0x3f)) {
/* Somebody else is BM. Only act as IRM. */
if (local_id == irm_id)
@@ -459,11 +461,11 @@ EXPORT_SYMBOL(fw_card_add);
/*
- * The next few functions implements a dummy driver that use once a
- * card driver shuts down an fw_card. This allows the driver to
- * cleanly unload, as all IO to the card will be handled by the dummy
- * driver instead of calling into the (possibly) unloaded module. The
- * dummy driver just fails all IO.
+ * The next few functions implement a dummy driver that is used once a card
+ * driver shuts down an fw_card. This allows the driver to cleanly unload,
+ * as all IO to the card will be handled (and failed) by the dummy driver
+ * instead of calling into the module. Only functions for iso context
+ * shutdown still need to be provided by the card driver.
*/
static int dummy_enable(struct fw_card *card, u32 *config_rom, size_t length)
@@ -510,7 +512,7 @@ static int dummy_enable_phys_dma(struct fw_card *card,
return -ENODEV;
}
-static struct fw_card_driver dummy_driver = {
+static const struct fw_card_driver dummy_driver_template = {
.enable = dummy_enable,
.update_phy_reg = dummy_update_phy_reg,
.set_config_rom = dummy_set_config_rom,
@@ -529,6 +531,8 @@ void fw_card_release(struct kref *kref)
void fw_core_remove_card(struct fw_card *card)
{
+ struct fw_card_driver dummy_driver = dummy_driver_template;
+
card->driver->update_phy_reg(card, 4,
PHY_LINK_ACTIVE | PHY_CONTENDER, 0);
fw_core_initiate_bus_reset(card, 1);
@@ -537,7 +541,9 @@ void fw_core_remove_card(struct fw_card *card)
list_del_init(&card->link);
mutex_unlock(&card_mutex);
- /* Set up the dummy driver. */
+ /* Switch off most of the card driver interface. */
+ dummy_driver.free_iso_context = card->driver->free_iso_context;
+ dummy_driver.stop_iso = card->driver->stop_iso;
card->driver = &dummy_driver;
fw_destroy_nodes(card);
diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c
index d1d30c615b0f..ced186d7e9a9 100644
--- a/drivers/firewire/core-cdev.c
+++ b/drivers/firewire/core-cdev.c
@@ -125,6 +125,7 @@ struct iso_resource {
int generation;
u64 channels;
s32 bandwidth;
+ __be32 transaction_data[2];
struct iso_resource_event *e_alloc, *e_dealloc;
};
@@ -1049,7 +1050,8 @@ static void iso_resource_work(struct work_struct *work)
r->channels, &channel, &bandwidth,
todo == ISO_RES_ALLOC ||
todo == ISO_RES_REALLOC ||
- todo == ISO_RES_ALLOC_ONCE);
+ todo == ISO_RES_ALLOC_ONCE,
+ r->transaction_data);
/*
* Is this generation outdated already? As long as this resource sticks
* in the idr, it will be scheduled again for a newer generation or at
diff --git a/drivers/firewire/core-iso.c b/drivers/firewire/core-iso.c
index 28076c892d7e..110e731f5574 100644
--- a/drivers/firewire/core-iso.c
+++ b/drivers/firewire/core-iso.c
@@ -71,7 +71,7 @@ int fw_iso_buffer_init(struct fw_iso_buffer *buffer, struct fw_card *card,
for (j = 0; j < i; j++) {
address = page_private(buffer->pages[j]);
dma_unmap_page(card->device, address,
- PAGE_SIZE, DMA_TO_DEVICE);
+ PAGE_SIZE, direction);
__free_page(buffer->pages[j]);
}
kfree(buffer->pages);
@@ -80,6 +80,7 @@ int fw_iso_buffer_init(struct fw_iso_buffer *buffer, struct fw_card *card,
return -ENOMEM;
}
+EXPORT_SYMBOL(fw_iso_buffer_init);
int fw_iso_buffer_map(struct fw_iso_buffer *buffer, struct vm_area_struct *vma)
{
@@ -107,13 +108,14 @@ void fw_iso_buffer_destroy(struct fw_iso_buffer *buffer,
for (i = 0; i < buffer->page_count; i++) {
address = page_private(buffer->pages[i]);
dma_unmap_page(card->device, address,
- PAGE_SIZE, DMA_TO_DEVICE);
+ PAGE_SIZE, buffer->direction);
__free_page(buffer->pages[i]);
}
kfree(buffer->pages);
buffer->pages = NULL;
}
+EXPORT_SYMBOL(fw_iso_buffer_destroy);
struct fw_iso_context *fw_iso_context_create(struct fw_card *card,
int type, int channel, int speed, size_t header_size,
@@ -136,6 +138,7 @@ struct fw_iso_context *fw_iso_context_create(struct fw_card *card,
return ctx;
}
+EXPORT_SYMBOL(fw_iso_context_create);
void fw_iso_context_destroy(struct fw_iso_context *ctx)
{
@@ -143,12 +146,14 @@ void fw_iso_context_destroy(struct fw_iso_context *ctx)
card->driver->free_iso_context(ctx);
}
+EXPORT_SYMBOL(fw_iso_context_destroy);
int fw_iso_context_start(struct fw_iso_context *ctx,
int cycle, int sync, int tags)
{
return ctx->card->driver->start_iso(ctx, cycle, sync, tags);
}
+EXPORT_SYMBOL(fw_iso_context_start);
int fw_iso_context_queue(struct fw_iso_context *ctx,
struct fw_iso_packet *packet,
@@ -159,20 +164,21 @@ int fw_iso_context_queue(struct fw_iso_context *ctx,
return card->driver->queue_iso(ctx, packet, buffer, payload);
}
+EXPORT_SYMBOL(fw_iso_context_queue);
int fw_iso_context_stop(struct fw_iso_context *ctx)
{
return ctx->card->driver->stop_iso(ctx);
}
+EXPORT_SYMBOL(fw_iso_context_stop);
/*
* Isochronous bus resource management (channels, bandwidth), client side
*/
static int manage_bandwidth(struct fw_card *card, int irm_id, int generation,
- int bandwidth, bool allocate)
+ int bandwidth, bool allocate, __be32 data[2])
{
- __be32 data[2];
int try, new, old = allocate ? BANDWIDTH_AVAILABLE_INITIAL : 0;
/*
@@ -208,9 +214,9 @@ static int manage_bandwidth(struct fw_card *card, int irm_id, int generation,
}
static int manage_channel(struct fw_card *card, int irm_id, int generation,
- u32 channels_mask, u64 offset, bool allocate)
+ u32 channels_mask, u64 offset, bool allocate, __be32 data[2])
{
- __be32 data[2], c, all, old;
+ __be32 c, all, old;
int i, retry = 5;
old = all = allocate ? cpu_to_be32(~0) : 0;
@@ -253,7 +259,7 @@ static int manage_channel(struct fw_card *card, int irm_id, int generation,
}
static void deallocate_channel(struct fw_card *card, int irm_id,
- int generation, int channel)
+ int generation, int channel, __be32 buffer[2])
{
u32 mask;
u64 offset;
@@ -262,7 +268,7 @@ static void deallocate_channel(struct fw_card *card, int irm_id,
offset = channel < 32 ? CSR_REGISTER_BASE + CSR_CHANNELS_AVAILABLE_HI :
CSR_REGISTER_BASE + CSR_CHANNELS_AVAILABLE_LO;
- manage_channel(card, irm_id, generation, mask, offset, false);
+ manage_channel(card, irm_id, generation, mask, offset, false, buffer);
}
/**
@@ -291,7 +297,7 @@ static void deallocate_channel(struct fw_card *card, int irm_id,
*/
void fw_iso_resource_manage(struct fw_card *card, int generation,
u64 channels_mask, int *channel, int *bandwidth,
- bool allocate)
+ bool allocate, __be32 buffer[2])
{
u32 channels_hi = channels_mask; /* channels 31...0 */
u32 channels_lo = channels_mask >> 32; /* channels 63...32 */
@@ -303,10 +309,12 @@ void fw_iso_resource_manage(struct fw_card *card, int generation,
if (channels_hi)
c = manage_channel(card, irm_id, generation, channels_hi,
- CSR_REGISTER_BASE + CSR_CHANNELS_AVAILABLE_HI, allocate);
+ CSR_REGISTER_BASE + CSR_CHANNELS_AVAILABLE_HI,
+ allocate, buffer);
if (channels_lo && c < 0) {
c = manage_channel(card, irm_id, generation, channels_lo,
- CSR_REGISTER_BASE + CSR_CHANNELS_AVAILABLE_LO, allocate);
+ CSR_REGISTER_BASE + CSR_CHANNELS_AVAILABLE_LO,
+ allocate, buffer);
if (c >= 0)
c += 32;
}
@@ -318,12 +326,13 @@ void fw_iso_resource_manage(struct fw_card *card, int generation,
if (*bandwidth == 0)
return;
- ret = manage_bandwidth(card, irm_id, generation, *bandwidth, allocate);
+ ret = manage_bandwidth(card, irm_id, generation, *bandwidth,
+ allocate, buffer);
if (ret < 0)
*bandwidth = 0;
if (allocate && ret < 0 && c >= 0) {
- deallocate_channel(card, irm_id, generation, c);
+ deallocate_channel(card, irm_id, generation, c, buffer);
*channel = ret;
}
}
diff --git a/drivers/firewire/core.h b/drivers/firewire/core.h
index 0a25a7b38a80..6052816be353 100644
--- a/drivers/firewire/core.h
+++ b/drivers/firewire/core.h
@@ -1,7 +1,6 @@
#ifndef _FIREWIRE_CORE_H
#define _FIREWIRE_CORE_H
-#include <linux/dma-mapping.h>
#include <linux/fs.h>
#include <linux/list.h>
#include <linux/idr.h>
@@ -97,17 +96,6 @@ int fw_core_initiate_bus_reset(struct fw_card *card, int short_reset);
int fw_compute_block_crc(u32 *block);
void fw_schedule_bm_work(struct fw_card *card, unsigned long delay);
-struct fw_descriptor {
- struct list_head link;
- size_t length;
- u32 immediate;
- u32 key;
- const u32 *data;
-};
-
-int fw_core_add_descriptor(struct fw_descriptor *desc);
-void fw_core_remove_descriptor(struct fw_descriptor *desc);
-
/* -cdev */
@@ -130,79 +118,10 @@ void fw_node_event(struct fw_card *card, struct fw_node *node, int event);
/* -iso */
-/*
- * The iso packet format allows for an immediate header/payload part
- * stored in 'header' immediately after the packet info plus an
- * indirect payload part that is pointer to by the 'payload' field.
- * Applications can use one or the other or both to implement simple
- * low-bandwidth streaming (e.g. audio) or more advanced
- * scatter-gather streaming (e.g. assembling video frame automatically).
- */
-struct fw_iso_packet {
- u16 payload_length; /* Length of indirect payload. */
- u32 interrupt:1; /* Generate interrupt on this packet */
- u32 skip:1; /* Set to not send packet at all. */
- u32 tag:2;
- u32 sy:4;
- u32 header_length:8; /* Length of immediate header. */
- u32 header[0];
-};
-
-#define FW_ISO_CONTEXT_TRANSMIT 0
-#define FW_ISO_CONTEXT_RECEIVE 1
-
-#define FW_ISO_CONTEXT_MATCH_TAG0 1
-#define FW_ISO_CONTEXT_MATCH_TAG1 2
-#define FW_ISO_CONTEXT_MATCH_TAG2 4
-#define FW_ISO_CONTEXT_MATCH_TAG3 8
-#define FW_ISO_CONTEXT_MATCH_ALL_TAGS 15
-
-/*
- * An iso buffer is just a set of pages mapped for DMA in the
- * specified direction. Since the pages are to be used for DMA, they
- * are not mapped into the kernel virtual address space. We store the
- * DMA address in the page private. The helper function
- * fw_iso_buffer_map() will map the pages into a given vma.
- */
-struct fw_iso_buffer {
- enum dma_data_direction direction;
- struct page **pages;
- int page_count;
-};
-
-typedef void (*fw_iso_callback_t)(struct fw_iso_context *context,
- u32 cycle, size_t header_length,
- void *header, void *data);
-
-struct fw_iso_context {
- struct fw_card *card;
- int type;
- int channel;
- int speed;
- size_t header_size;
- fw_iso_callback_t callback;
- void *callback_data;
-};
-
-int fw_iso_buffer_init(struct fw_iso_buffer *buffer, struct fw_card *card,
- int page_count, enum dma_data_direction direction);
int fw_iso_buffer_map(struct fw_iso_buffer *buffer, struct vm_area_struct *vma);
-void fw_iso_buffer_destroy(struct fw_iso_buffer *buffer, struct fw_card *card);
-
-struct fw_iso_context *fw_iso_context_create(struct fw_card *card,
- int type, int channel, int speed, size_t header_size,
- fw_iso_callback_t callback, void *callback_data);
-int fw_iso_context_queue(struct fw_iso_context *ctx,
- struct fw_iso_packet *packet,
- struct fw_iso_buffer *buffer,
- unsigned long payload);
-int fw_iso_context_start(struct fw_iso_context *ctx,
- int cycle, int sync, int tags);
-int fw_iso_context_stop(struct fw_iso_context *ctx);
-void fw_iso_context_destroy(struct fw_iso_context *ctx);
-
void fw_iso_resource_manage(struct fw_card *card, int generation,
- u64 channels_mask, int *channel, int *bandwidth, bool allocate);
+ u64 channels_mask, int *channel, int *bandwidth,
+ bool allocate, __be32 buffer[2]);
/* -topology */
@@ -285,9 +204,4 @@ void fw_flush_transactions(struct fw_card *card);
void fw_send_phy_config(struct fw_card *card,
int node_id, int generation, int gap_count);
-static inline int fw_stream_packet_destination_id(int tag, int channel, int sy)
-{
- return tag << 14 | channel << 8 | sy;
-}
-
#endif /* _FIREWIRE_CORE_H */
diff --git a/drivers/firewire/net.c b/drivers/firewire/net.c
new file mode 100644
index 000000000000..a42209a73aed
--- /dev/null
+++ b/drivers/firewire/net.c
@@ -0,0 +1,1655 @@
+/*
+ * IPv4 over IEEE 1394, per RFC 2734
+ *
+ * Copyright (C) 2009 Jay Fenlason <fenlason@redhat.com>
+ *
+ * based on eth1394 by Ben Collins et al
+ */
+
+#include <linux/bug.h>
+#include <linux/device.h>
+#include <linux/ethtool.h>
+#include <linux/firewire.h>
+#include <linux/firewire-constants.h>
+#include <linux/highmem.h>
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <linux/jiffies.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/mutex.h>
+#include <linux/netdevice.h>
+#include <linux/skbuff.h>
+#include <linux/spinlock.h>
+
+#include <asm/unaligned.h>
+#include <net/arp.h>
+
+#define FWNET_MAX_FRAGMENTS 25 /* arbitrary limit */
+#define FWNET_ISO_PAGE_COUNT (PAGE_SIZE < 16 * 1024 ? 4 : 2)
+
+#define IEEE1394_BROADCAST_CHANNEL 31
+#define IEEE1394_ALL_NODES (0xffc0 | 0x003f)
+#define IEEE1394_MAX_PAYLOAD_S100 512
+#define FWNET_NO_FIFO_ADDR (~0ULL)
+
+#define IANA_SPECIFIER_ID 0x00005eU
+#define RFC2734_SW_VERSION 0x000001U
+
+#define IEEE1394_GASP_HDR_SIZE 8
+
+#define RFC2374_UNFRAG_HDR_SIZE 4
+#define RFC2374_FRAG_HDR_SIZE 8
+#define RFC2374_FRAG_OVERHEAD 4
+
+#define RFC2374_HDR_UNFRAG 0 /* unfragmented */
+#define RFC2374_HDR_FIRSTFRAG 1 /* first fragment */
+#define RFC2374_HDR_LASTFRAG 2 /* last fragment */
+#define RFC2374_HDR_INTFRAG 3 /* interior fragment */
+
+#define RFC2734_HW_ADDR_LEN 16
+
+struct rfc2734_arp {
+ __be16 hw_type; /* 0x0018 */
+ __be16 proto_type; /* 0x0806 */
+ u8 hw_addr_len; /* 16 */
+ u8 ip_addr_len; /* 4 */
+ __be16 opcode; /* ARP Opcode */
+ /* Above is exactly the same format as struct arphdr */
+
+ __be64 s_uniq_id; /* Sender's 64bit EUI */
+ u8 max_rec; /* Sender's max packet size */
+ u8 sspd; /* Sender's max speed */
+ __be16 fifo_hi; /* hi 16bits of sender's FIFO addr */
+ __be32 fifo_lo; /* lo 32bits of sender's FIFO addr */
+ __be32 sip; /* Sender's IP Address */
+ __be32 tip; /* IP Address of requested hw addr */
+} __attribute__((packed));
+
+/* This header format is specific to this driver implementation. */
+#define FWNET_ALEN 8
+#define FWNET_HLEN 10
+struct fwnet_header {
+ u8 h_dest[FWNET_ALEN]; /* destination address */
+ __be16 h_proto; /* packet type ID field */
+} __attribute__((packed));
+
+/* IPv4 and IPv6 encapsulation header */
+struct rfc2734_header {
+ u32 w0;
+ u32 w1;
+};
+
+#define fwnet_get_hdr_lf(h) (((h)->w0 & 0xc0000000) >> 30)
+#define fwnet_get_hdr_ether_type(h) (((h)->w0 & 0x0000ffff))
+#define fwnet_get_hdr_dg_size(h) (((h)->w0 & 0x0fff0000) >> 16)
+#define fwnet_get_hdr_fg_off(h) (((h)->w0 & 0x00000fff))
+#define fwnet_get_hdr_dgl(h) (((h)->w1 & 0xffff0000) >> 16)
+
+#define fwnet_set_hdr_lf(lf) ((lf) << 30)
+#define fwnet_set_hdr_ether_type(et) (et)
+#define fwnet_set_hdr_dg_size(dgs) ((dgs) << 16)
+#define fwnet_set_hdr_fg_off(fgo) (fgo)
+
+#define fwnet_set_hdr_dgl(dgl) ((dgl) << 16)
+
+static inline void fwnet_make_uf_hdr(struct rfc2734_header *hdr,
+ unsigned ether_type)
+{
+ hdr->w0 = fwnet_set_hdr_lf(RFC2374_HDR_UNFRAG)
+ | fwnet_set_hdr_ether_type(ether_type);
+}
+
+static inline void fwnet_make_ff_hdr(struct rfc2734_header *hdr,
+ unsigned ether_type, unsigned dg_size, unsigned dgl)
+{
+ hdr->w0 = fwnet_set_hdr_lf(RFC2374_HDR_FIRSTFRAG)
+ | fwnet_set_hdr_dg_size(dg_size)
+ | fwnet_set_hdr_ether_type(ether_type);
+ hdr->w1 = fwnet_set_hdr_dgl(dgl);
+}
+
+static inline void fwnet_make_sf_hdr(struct rfc2734_header *hdr,
+ unsigned lf, unsigned dg_size, unsigned fg_off, unsigned dgl)
+{
+ hdr->w0 = fwnet_set_hdr_lf(lf)
+ | fwnet_set_hdr_dg_size(dg_size)
+ | fwnet_set_hdr_fg_off(fg_off);
+ hdr->w1 = fwnet_set_hdr_dgl(dgl);
+}
+
+/* This list keeps track of what parts of the datagram have been filled in */
+struct fwnet_fragment_info {
+ struct list_head fi_link;
+ u16 offset;
+ u16 len;
+};
+
+struct fwnet_partial_datagram {
+ struct list_head pd_link;
+ struct list_head fi_list;
+ struct sk_buff *skb;
+ /* FIXME Why not use skb->data? */
+ char *pbuf;
+ u16 datagram_label;
+ u16 ether_type;
+ u16 datagram_size;
+};
+
+static DEFINE_MUTEX(fwnet_device_mutex);
+static LIST_HEAD(fwnet_device_list);
+
+struct fwnet_device {
+ struct list_head dev_link;
+ spinlock_t lock;
+ enum {
+ FWNET_BROADCAST_ERROR,
+ FWNET_BROADCAST_RUNNING,
+ FWNET_BROADCAST_STOPPED,
+ } broadcast_state;
+ struct fw_iso_context *broadcast_rcv_context;
+ struct fw_iso_buffer broadcast_rcv_buffer;
+ void **broadcast_rcv_buffer_ptrs;
+ unsigned broadcast_rcv_next_ptr;
+ unsigned num_broadcast_rcv_ptrs;
+ unsigned rcv_buffer_size;
+ /*
+ * This value is the maximum unfragmented datagram size that can be
+ * sent by the hardware. It already has the GASP overhead and the
+ * unfragmented datagram header overhead calculated into it.
+ */
+ unsigned broadcast_xmt_max_payload;
+ u16 broadcast_xmt_datagramlabel;
+
+ /*
+ * The CSR address that remote nodes must send datagrams to for us to
+ * receive them.
+ */
+ struct fw_address_handler handler;
+ u64 local_fifo;
+
+ /* List of packets to be sent */
+ struct list_head packet_list;
+ /*
+ * List of packets that were broadcasted. When we get an ISO interrupt
+ * one of them has been sent
+ */
+ struct list_head broadcasted_list;
+ /* List of packets that have been sent but not yet acked */
+ struct list_head sent_list;
+
+ struct list_head peer_list;
+ struct fw_card *card;
+ struct net_device *netdev;
+};
+
+struct fwnet_peer {
+ struct list_head peer_link;
+ struct fwnet_device *dev;
+ u64 guid;
+ u64 fifo;
+
+ /* guarded by dev->lock */
+ struct list_head pd_list; /* received partial datagrams */
+ unsigned pdg_size; /* pd_list size */
+
+ u16 datagram_label; /* outgoing datagram label */
+ unsigned max_payload; /* includes RFC2374_FRAG_HDR_SIZE overhead */
+ int node_id;
+ int generation;
+ unsigned speed;
+};
+
+/* This is our task struct. It's used for the packet complete callback. */
+struct fwnet_packet_task {
+ /*
+ * ptask can actually be on dev->packet_list, dev->broadcasted_list,
+ * or dev->sent_list depending on its current state.
+ */
+ struct list_head pt_link;
+ struct fw_transaction transaction;
+ struct rfc2734_header hdr;
+ struct sk_buff *skb;
+ struct fwnet_device *dev;
+
+ int outstanding_pkts;
+ unsigned max_payload;
+ u64 fifo_addr;
+ u16 dest_node;
+ u8 generation;
+ u8 speed;
+};
+
+/*
+ * saddr == NULL means use device source address.
+ * daddr == NULL means leave destination address (eg unresolved arp).
+ */
+static int fwnet_header_create(struct sk_buff *skb, struct net_device *net,
+ unsigned short type, const void *daddr,
+ const void *saddr, unsigned len)
+{
+ struct fwnet_header *h;
+
+ h = (struct fwnet_header *)skb_push(skb, sizeof(*h));
+ put_unaligned_be16(type, &h->h_proto);
+
+ if (net->flags & (IFF_LOOPBACK | IFF_NOARP)) {
+ memset(h->h_dest, 0, net->addr_len);
+
+ return net->hard_header_len;
+ }
+
+ if (daddr) {
+ memcpy(h->h_dest, daddr, net->addr_len);
+
+ return net->hard_header_len;
+ }
+
+ return -net->hard_header_len;
+}
+
+static int fwnet_header_rebuild(struct sk_buff *skb)
+{
+ struct fwnet_header *h = (struct fwnet_header *)skb->data;
+
+ if (get_unaligned_be16(&h->h_proto) == ETH_P_IP)
+ return arp_find((unsigned char *)&h->h_dest, skb);
+
+ fw_notify("%s: unable to resolve type %04x addresses\n",
+ skb->dev->name, be16_to_cpu(h->h_proto));
+ return 0;
+}
+
+static int fwnet_header_cache(const struct neighbour *neigh,
+ struct hh_cache *hh)
+{
+ struct net_device *net;
+ struct fwnet_header *h;
+
+ if (hh->hh_type == cpu_to_be16(ETH_P_802_3))
+ return -1;
+ net = neigh->dev;
+ h = (struct fwnet_header *)((u8 *)hh->hh_data + 16 - sizeof(*h));
+ h->h_proto = hh->hh_type;
+ memcpy(h->h_dest, neigh->ha, net->addr_len);
+ hh->hh_len = FWNET_HLEN;
+
+ return 0;
+}
+
+/* Called by Address Resolution module to notify changes in address. */
+static void fwnet_header_cache_update(struct hh_cache *hh,
+ const struct net_device *net, const unsigned char *haddr)
+{
+ memcpy((u8 *)hh->hh_data + 16 - FWNET_HLEN, haddr, net->addr_len);
+}
+
+static int fwnet_header_parse(const struct sk_buff *skb, unsigned char *haddr)
+{
+ memcpy(haddr, skb->dev->dev_addr, FWNET_ALEN);
+
+ return FWNET_ALEN;
+}
+
+static const struct header_ops fwnet_header_ops = {
+ .create = fwnet_header_create,
+ .rebuild = fwnet_header_rebuild,
+ .cache = fwnet_header_cache,
+ .cache_update = fwnet_header_cache_update,
+ .parse = fwnet_header_parse,
+};
+
+/* FIXME: is this correct for all cases? */
+static bool fwnet_frag_overlap(struct fwnet_partial_datagram *pd,
+ unsigned offset, unsigned len)
+{
+ struct fwnet_fragment_info *fi;
+ unsigned end = offset + len;
+
+ list_for_each_entry(fi, &pd->fi_list, fi_link)
+ if (offset < fi->offset + fi->len && end > fi->offset)
+ return true;
+
+ return false;
+}
+
+/* Assumes that new fragment does not overlap any existing fragments */
+static struct fwnet_fragment_info *fwnet_frag_new(
+ struct fwnet_partial_datagram *pd, unsigned offset, unsigned len)
+{
+ struct fwnet_fragment_info *fi, *fi2, *new;
+ struct list_head *list;
+
+ list = &pd->fi_list;
+ list_for_each_entry(fi, &pd->fi_list, fi_link) {
+ if (fi->offset + fi->len == offset) {
+ /* The new fragment can be tacked on to the end */
+ /* Did the new fragment plug a hole? */
+ fi2 = list_entry(fi->fi_link.next,
+ struct fwnet_fragment_info, fi_link);
+ if (fi->offset + fi->len == fi2->offset) {
+ /* glue fragments together */
+ fi->len += len + fi2->len;
+ list_del(&fi2->fi_link);
+ kfree(fi2);
+ } else {
+ fi->len += len;
+ }
+
+ return fi;
+ }
+ if (offset + len == fi->offset) {
+ /* The new fragment can be tacked on to the beginning */
+ /* Did the new fragment plug a hole? */
+ fi2 = list_entry(fi->fi_link.prev,
+ struct fwnet_fragment_info, fi_link);
+ if (fi2->offset + fi2->len == fi->offset) {
+ /* glue fragments together */
+ fi2->len += fi->len + len;
+ list_del(&fi->fi_link);
+ kfree(fi);
+
+ return fi2;
+ }
+ fi->offset = offset;
+ fi->len += len;
+
+ return fi;
+ }
+ if (offset > fi->offset + fi->len) {
+ list = &fi->fi_link;
+ break;
+ }
+ if (offset + len < fi->offset) {
+ list = fi->fi_link.prev;
+ break;
+ }
+ }
+
+ new = kmalloc(sizeof(*new), GFP_ATOMIC);
+ if (!new) {
+ fw_error("out of memory\n");
+ return NULL;
+ }
+
+ new->offset = offset;
+ new->len = len;
+ list_add(&new->fi_link, list);
+
+ return new;
+}
+
+static struct fwnet_partial_datagram *fwnet_pd_new(struct net_device *net,
+ struct fwnet_peer *peer, u16 datagram_label, unsigned dg_size,
+ void *frag_buf, unsigned frag_off, unsigned frag_len)
+{
+ struct fwnet_partial_datagram *new;
+ struct fwnet_fragment_info *fi;
+
+ new = kmalloc(sizeof(*new), GFP_ATOMIC);
+ if (!new)
+ goto fail;
+
+ INIT_LIST_HEAD(&new->fi_list);
+ fi = fwnet_frag_new(new, frag_off, frag_len);
+ if (fi == NULL)
+ goto fail_w_new;
+
+ new->datagram_label = datagram_label;
+ new->datagram_size = dg_size;
+ new->skb = dev_alloc_skb(dg_size + net->hard_header_len + 15);
+ if (new->skb == NULL)
+ goto fail_w_fi;
+
+ skb_reserve(new->skb, (net->hard_header_len + 15) & ~15);
+ new->pbuf = skb_put(new->skb, dg_size);
+ memcpy(new->pbuf + frag_off, frag_buf, frag_len);
+ list_add_tail(&new->pd_link, &peer->pd_list);
+
+ return new;
+
+fail_w_fi:
+ kfree(fi);
+fail_w_new:
+ kfree(new);
+fail:
+ fw_error("out of memory\n");
+
+ return NULL;
+}
+
+static struct fwnet_partial_datagram *fwnet_pd_find(struct fwnet_peer *peer,
+ u16 datagram_label)
+{
+ struct fwnet_partial_datagram *pd;
+
+ list_for_each_entry(pd, &peer->pd_list, pd_link)
+ if (pd->datagram_label == datagram_label)
+ return pd;
+
+ return NULL;
+}
+
+
+static void fwnet_pd_delete(struct fwnet_partial_datagram *old)
+{
+ struct fwnet_fragment_info *fi, *n;
+
+ list_for_each_entry_safe(fi, n, &old->fi_list, fi_link)
+ kfree(fi);
+
+ list_del(&old->pd_link);
+ dev_kfree_skb_any(old->skb);
+ kfree(old);
+}
+
+static bool fwnet_pd_update(struct fwnet_peer *peer,
+ struct fwnet_partial_datagram *pd, void *frag_buf,
+ unsigned frag_off, unsigned frag_len)
+{
+ if (fwnet_frag_new(pd, frag_off, frag_len) == NULL)
+ return false;
+
+ memcpy(pd->pbuf + frag_off, frag_buf, frag_len);
+
+ /*
+ * Move list entry to beginnig of list so that oldest partial
+ * datagrams percolate to the end of the list
+ */
+ list_move_tail(&pd->pd_link, &peer->pd_list);
+
+ return true;
+}
+
+static bool fwnet_pd_is_complete(struct fwnet_partial_datagram *pd)
+{
+ struct fwnet_fragment_info *fi;
+
+ fi = list_entry(pd->fi_list.next, struct fwnet_fragment_info, fi_link);
+
+ return fi->len == pd->datagram_size;
+}
+
+/* caller must hold dev->lock */
+static struct fwnet_peer *fwnet_peer_find_by_guid(struct fwnet_device *dev,
+ u64 guid)
+{
+ struct fwnet_peer *peer;
+
+ list_for_each_entry(peer, &dev->peer_list, peer_link)
+ if (peer->guid == guid)
+ return peer;
+
+ return NULL;
+}
+
+/* caller must hold dev->lock */
+static struct fwnet_peer *fwnet_peer_find_by_node_id(struct fwnet_device *dev,
+ int node_id, int generation)
+{
+ struct fwnet_peer *peer;
+
+ list_for_each_entry(peer, &dev->peer_list, peer_link)
+ if (peer->node_id == node_id &&
+ peer->generation == generation)
+ return peer;
+
+ return NULL;
+}
+
+/* See IEEE 1394-2008 table 6-4, table 8-8, table 16-18. */
+static unsigned fwnet_max_payload(unsigned max_rec, unsigned speed)
+{
+ max_rec = min(max_rec, speed + 8);
+ max_rec = min(max_rec, 0xbU); /* <= 4096 */
+ if (max_rec < 8) {
+ fw_notify("max_rec %x out of range\n", max_rec);
+ max_rec = 8;
+ }
+
+ return (1 << (max_rec + 1)) - RFC2374_FRAG_HDR_SIZE;
+}
+
+
+static int fwnet_finish_incoming_packet(struct net_device *net,
+ struct sk_buff *skb, u16 source_node_id,
+ bool is_broadcast, u16 ether_type)
+{
+ struct fwnet_device *dev;
+ static const __be64 broadcast_hw = cpu_to_be64(~0ULL);
+ int status;
+ __be64 guid;
+
+ dev = netdev_priv(net);
+ /* Write metadata, and then pass to the receive level */
+ skb->dev = net;
+ skb->ip_summed = CHECKSUM_UNNECESSARY; /* don't check it */
+
+ /*
+ * Parse the encapsulation header. This actually does the job of
+ * converting to an ethernet frame header, as well as arp
+ * conversion if needed. ARP conversion is easier in this
+ * direction, since we are using ethernet as our backend.
+ */
+ /*
+ * If this is an ARP packet, convert it. First, we want to make
+ * use of some of the fields, since they tell us a little bit
+ * about the sending machine.
+ */
+ if (ether_type == ETH_P_ARP) {
+ struct rfc2734_arp *arp1394;
+ struct arphdr *arp;
+ unsigned char *arp_ptr;
+ u64 fifo_addr;
+ u64 peer_guid;
+ unsigned sspd;
+ u16 max_payload;
+ struct fwnet_peer *peer;
+ unsigned long flags;
+
+ arp1394 = (struct rfc2734_arp *)skb->data;
+ arp = (struct arphdr *)skb->data;
+ arp_ptr = (unsigned char *)(arp + 1);
+ peer_guid = get_unaligned_be64(&arp1394->s_uniq_id);
+ fifo_addr = (u64)get_unaligned_be16(&arp1394->fifo_hi) << 32
+ | get_unaligned_be32(&arp1394->fifo_lo);
+
+ sspd = arp1394->sspd;
+ /* Sanity check. OS X 10.3 PPC reportedly sends 131. */
+ if (sspd > SCODE_3200) {
+ fw_notify("sspd %x out of range\n", sspd);
+ sspd = SCODE_3200;
+ }
+ max_payload = fwnet_max_payload(arp1394->max_rec, sspd);
+
+ spin_lock_irqsave(&dev->lock, flags);
+ peer = fwnet_peer_find_by_guid(dev, peer_guid);
+ if (peer) {
+ peer->fifo = fifo_addr;
+
+ if (peer->speed > sspd)
+ peer->speed = sspd;
+ if (peer->max_payload > max_payload)
+ peer->max_payload = max_payload;
+ }
+ spin_unlock_irqrestore(&dev->lock, flags);
+
+ if (!peer) {
+ fw_notify("No peer for ARP packet from %016llx\n",
+ (unsigned long long)peer_guid);
+ goto failed_proto;
+ }
+
+ /*
+ * Now that we're done with the 1394 specific stuff, we'll
+ * need to alter some of the data. Believe it or not, all
+ * that needs to be done is sender_IP_address needs to be
+ * moved, the destination hardware address get stuffed
+ * in and the hardware address length set to 8.
+ *
+ * IMPORTANT: The code below overwrites 1394 specific data
+ * needed above so keep the munging of the data for the
+ * higher level IP stack last.
+ */
+
+ arp->ar_hln = 8;
+ /* skip over sender unique id */
+ arp_ptr += arp->ar_hln;
+ /* move sender IP addr */
+ put_unaligned(arp1394->sip, (u32 *)arp_ptr);
+ /* skip over sender IP addr */
+ arp_ptr += arp->ar_pln;
+
+ if (arp->ar_op == htons(ARPOP_REQUEST))
+ memset(arp_ptr, 0, sizeof(u64));
+ else
+ memcpy(arp_ptr, net->dev_addr, sizeof(u64));
+ }
+
+ /* Now add the ethernet header. */
+ guid = cpu_to_be64(dev->card->guid);
+ if (dev_hard_header(skb, net, ether_type,
+ is_broadcast ? &broadcast_hw : &guid,
+ NULL, skb->len) >= 0) {
+ struct fwnet_header *eth;
+ u16 *rawp;
+ __be16 protocol;
+
+ skb_reset_mac_header(skb);
+ skb_pull(skb, sizeof(*eth));
+ eth = (struct fwnet_header *)skb_mac_header(skb);
+ if (*eth->h_dest & 1) {
+ if (memcmp(eth->h_dest, net->broadcast,
+ net->addr_len) == 0)
+ skb->pkt_type = PACKET_BROADCAST;
+#if 0
+ else
+ skb->pkt_type = PACKET_MULTICAST;
+#endif
+ } else {
+ if (memcmp(eth->h_dest, net->dev_addr, net->addr_len))
+ skb->pkt_type = PACKET_OTHERHOST;
+ }
+ if (ntohs(eth->h_proto) >= 1536) {
+ protocol = eth->h_proto;
+ } else {
+ rawp = (u16 *)skb->data;
+ if (*rawp == 0xffff)
+ protocol = htons(ETH_P_802_3);
+ else
+ protocol = htons(ETH_P_802_2);
+ }
+ skb->protocol = protocol;
+ }
+ status = netif_rx(skb);
+ if (status == NET_RX_DROP) {
+ net->stats.rx_errors++;
+ net->stats.rx_dropped++;
+ } else {
+ net->stats.rx_packets++;
+ net->stats.rx_bytes += skb->len;
+ }
+ if (netif_queue_stopped(net))
+ netif_wake_queue(net);
+
+ return 0;
+
+ failed_proto:
+ net->stats.rx_errors++;
+ net->stats.rx_dropped++;
+
+ dev_kfree_skb_any(skb);
+ if (netif_queue_stopped(net))
+ netif_wake_queue(net);
+
+ net->last_rx = jiffies;
+
+ return 0;
+}
+
+static int fwnet_incoming_packet(struct fwnet_device *dev, __be32 *buf, int len,
+ int source_node_id, int generation,
+ bool is_broadcast)
+{
+ struct sk_buff *skb;
+ struct net_device *net = dev->netdev;
+ struct rfc2734_header hdr;
+ unsigned lf;
+ unsigned long flags;
+ struct fwnet_peer *peer;
+ struct fwnet_partial_datagram *pd;
+ int fg_off;
+ int dg_size;
+ u16 datagram_label;
+ int retval;
+ u16 ether_type;
+
+ hdr.w0 = be32_to_cpu(buf[0]);
+ lf = fwnet_get_hdr_lf(&hdr);
+ if (lf == RFC2374_HDR_UNFRAG) {
+ /*
+ * An unfragmented datagram has been received by the ieee1394
+ * bus. Build an skbuff around it so we can pass it to the
+ * high level network layer.
+ */
+ ether_type = fwnet_get_hdr_ether_type(&hdr);
+ buf++;
+ len -= RFC2374_UNFRAG_HDR_SIZE;
+
+ skb = dev_alloc_skb(len + net->hard_header_len + 15);
+ if (unlikely(!skb)) {
+ fw_error("out of memory\n");
+ net->stats.rx_dropped++;
+
+ return -1;
+ }
+ skb_reserve(skb, (net->hard_header_len + 15) & ~15);
+ memcpy(skb_put(skb, len), buf, len);
+
+ return fwnet_finish_incoming_packet(net, skb, source_node_id,
+ is_broadcast, ether_type);
+ }
+ /* A datagram fragment has been received, now the fun begins. */
+ hdr.w1 = ntohl(buf[1]);
+ buf += 2;
+ len -= RFC2374_FRAG_HDR_SIZE;
+ if (lf == RFC2374_HDR_FIRSTFRAG) {
+ ether_type = fwnet_get_hdr_ether_type(&hdr);
+ fg_off = 0;
+ } else {
+ ether_type = 0;
+ fg_off = fwnet_get_hdr_fg_off(&hdr);
+ }
+ datagram_label = fwnet_get_hdr_dgl(&hdr);
+ dg_size = fwnet_get_hdr_dg_size(&hdr); /* ??? + 1 */
+
+ spin_lock_irqsave(&dev->lock, flags);
+
+ peer = fwnet_peer_find_by_node_id(dev, source_node_id, generation);
+ if (!peer)
+ goto bad_proto;
+
+ pd = fwnet_pd_find(peer, datagram_label);
+ if (pd == NULL) {
+ while (peer->pdg_size >= FWNET_MAX_FRAGMENTS) {
+ /* remove the oldest */
+ fwnet_pd_delete(list_first_entry(&peer->pd_list,
+ struct fwnet_partial_datagram, pd_link));
+ peer->pdg_size--;
+ }
+ pd = fwnet_pd_new(net, peer, datagram_label,
+ dg_size, buf, fg_off, len);
+ if (pd == NULL) {
+ retval = -ENOMEM;
+ goto bad_proto;
+ }
+ peer->pdg_size++;
+ } else {
+ if (fwnet_frag_overlap(pd, fg_off, len) ||
+ pd->datagram_size != dg_size) {
+ /*
+ * Differing datagram sizes or overlapping fragments,
+ * discard old datagram and start a new one.
+ */
+ fwnet_pd_delete(pd);
+ pd = fwnet_pd_new(net, peer, datagram_label,
+ dg_size, buf, fg_off, len);
+ if (pd == NULL) {
+ retval = -ENOMEM;
+ peer->pdg_size--;
+ goto bad_proto;
+ }
+ } else {
+ if (!fwnet_pd_update(peer, pd, buf, fg_off, len)) {
+ /*
+ * Couldn't save off fragment anyway
+ * so might as well obliterate the
+ * datagram now.
+ */
+ fwnet_pd_delete(pd);
+ peer->pdg_size--;
+ goto bad_proto;
+ }
+ }
+ } /* new datagram or add to existing one */
+
+ if (lf == RFC2374_HDR_FIRSTFRAG)
+ pd->ether_type = ether_type;
+
+ if (fwnet_pd_is_complete(pd)) {
+ ether_type = pd->ether_type;
+ peer->pdg_size--;
+ skb = skb_get(pd->skb);
+ fwnet_pd_delete(pd);
+
+ spin_unlock_irqrestore(&dev->lock, flags);
+
+ return fwnet_finish_incoming_packet(net, skb, source_node_id,
+ false, ether_type);
+ }
+ /*
+ * Datagram is not complete, we're done for the
+ * moment.
+ */
+ spin_unlock_irqrestore(&dev->lock, flags);
+
+ return 0;
+
+ bad_proto:
+ spin_unlock_irqrestore(&dev->lock, flags);
+
+ if (netif_queue_stopped(net))
+ netif_wake_queue(net);
+
+ return 0;
+}
+
+static void fwnet_receive_packet(struct fw_card *card, struct fw_request *r,
+ int tcode, int destination, int source, int generation,
+ int speed, unsigned long long offset, void *payload,
+ size_t length, void *callback_data)
+{
+ struct fwnet_device *dev = callback_data;
+ int rcode;
+
+ if (destination == IEEE1394_ALL_NODES) {
+ kfree(r);
+
+ return;
+ }
+
+ if (offset != dev->handler.offset)
+ rcode = RCODE_ADDRESS_ERROR;
+ else if (tcode != TCODE_WRITE_BLOCK_REQUEST)
+ rcode = RCODE_TYPE_ERROR;
+ else if (fwnet_incoming_packet(dev, payload, length,
+ source, generation, false) != 0) {
+ fw_error("Incoming packet failure\n");
+ rcode = RCODE_CONFLICT_ERROR;
+ } else
+ rcode = RCODE_COMPLETE;
+
+ fw_send_response(card, r, rcode);
+}
+
+static void fwnet_receive_broadcast(struct fw_iso_context *context,
+ u32 cycle, size_t header_length, void *header, void *data)
+{
+ struct fwnet_device *dev;
+ struct fw_iso_packet packet;
+ struct fw_card *card;
+ __be16 *hdr_ptr;
+ __be32 *buf_ptr;
+ int retval;
+ u32 length;
+ u16 source_node_id;
+ u32 specifier_id;
+ u32 ver;
+ unsigned long offset;
+ unsigned long flags;
+
+ dev = data;
+ card = dev->card;
+ hdr_ptr = header;
+ length = be16_to_cpup(hdr_ptr);
+
+ spin_lock_irqsave(&dev->lock, flags);
+
+ offset = dev->rcv_buffer_size * dev->broadcast_rcv_next_ptr;
+ buf_ptr = dev->broadcast_rcv_buffer_ptrs[dev->broadcast_rcv_next_ptr++];
+ if (dev->broadcast_rcv_next_ptr == dev->num_broadcast_rcv_ptrs)
+ dev->broadcast_rcv_next_ptr = 0;
+
+ spin_unlock_irqrestore(&dev->lock, flags);
+
+ specifier_id = (be32_to_cpu(buf_ptr[0]) & 0xffff) << 8
+ | (be32_to_cpu(buf_ptr[1]) & 0xff000000) >> 24;
+ ver = be32_to_cpu(buf_ptr[1]) & 0xffffff;
+ source_node_id = be32_to_cpu(buf_ptr[0]) >> 16;
+
+ if (specifier_id == IANA_SPECIFIER_ID && ver == RFC2734_SW_VERSION) {
+ buf_ptr += 2;
+ length -= IEEE1394_GASP_HDR_SIZE;
+ fwnet_incoming_packet(dev, buf_ptr, length,
+ source_node_id, -1, true);
+ }
+
+ packet.payload_length = dev->rcv_buffer_size;
+ packet.interrupt = 1;
+ packet.skip = 0;
+ packet.tag = 3;
+ packet.sy = 0;
+ packet.header_length = IEEE1394_GASP_HDR_SIZE;
+
+ spin_lock_irqsave(&dev->lock, flags);
+
+ retval = fw_iso_context_queue(dev->broadcast_rcv_context, &packet,
+ &dev->broadcast_rcv_buffer, offset);
+
+ spin_unlock_irqrestore(&dev->lock, flags);
+
+ if (retval < 0)
+ fw_error("requeue failed\n");
+}
+
+static struct kmem_cache *fwnet_packet_task_cache;
+
+static int fwnet_send_packet(struct fwnet_packet_task *ptask);
+
+static void fwnet_transmit_packet_done(struct fwnet_packet_task *ptask)
+{
+ struct fwnet_device *dev;
+ unsigned long flags;
+
+ dev = ptask->dev;
+
+ spin_lock_irqsave(&dev->lock, flags);
+ list_del(&ptask->pt_link);
+ spin_unlock_irqrestore(&dev->lock, flags);
+
+ ptask->outstanding_pkts--; /* FIXME access inside lock */
+
+ if (ptask->outstanding_pkts > 0) {
+ u16 dg_size;
+ u16 fg_off;
+ u16 datagram_label;
+ u16 lf;
+ struct sk_buff *skb;
+
+ /* Update the ptask to point to the next fragment and send it */
+ lf = fwnet_get_hdr_lf(&ptask->hdr);
+ switch (lf) {
+ case RFC2374_HDR_LASTFRAG:
+ case RFC2374_HDR_UNFRAG:
+ default:
+ fw_error("Outstanding packet %x lf %x, header %x,%x\n",
+ ptask->outstanding_pkts, lf, ptask->hdr.w0,
+ ptask->hdr.w1);
+ BUG();
+
+ case RFC2374_HDR_FIRSTFRAG:
+ /* Set frag type here for future interior fragments */
+ dg_size = fwnet_get_hdr_dg_size(&ptask->hdr);
+ fg_off = ptask->max_payload - RFC2374_FRAG_HDR_SIZE;
+ datagram_label = fwnet_get_hdr_dgl(&ptask->hdr);
+ break;
+
+ case RFC2374_HDR_INTFRAG:
+ dg_size = fwnet_get_hdr_dg_size(&ptask->hdr);
+ fg_off = fwnet_get_hdr_fg_off(&ptask->hdr)
+ + ptask->max_payload - RFC2374_FRAG_HDR_SIZE;
+ datagram_label = fwnet_get_hdr_dgl(&ptask->hdr);
+ break;
+ }
+ skb = ptask->skb;
+ skb_pull(skb, ptask->max_payload);
+ if (ptask->outstanding_pkts > 1) {
+ fwnet_make_sf_hdr(&ptask->hdr, RFC2374_HDR_INTFRAG,
+ dg_size, fg_off, datagram_label);
+ } else {
+ fwnet_make_sf_hdr(&ptask->hdr, RFC2374_HDR_LASTFRAG,
+ dg_size, fg_off, datagram_label);
+ ptask->max_payload = skb->len + RFC2374_FRAG_HDR_SIZE;
+ }
+ fwnet_send_packet(ptask);
+ } else {
+ dev_kfree_skb_any(ptask->skb);
+ kmem_cache_free(fwnet_packet_task_cache, ptask);
+ }
+}
+
+static void fwnet_write_complete(struct fw_card *card, int rcode,
+ void *payload, size_t length, void *data)
+{
+ struct fwnet_packet_task *ptask;
+
+ ptask = data;
+
+ if (rcode == RCODE_COMPLETE)
+ fwnet_transmit_packet_done(ptask);
+ else
+ fw_error("fwnet_write_complete: failed: %x\n", rcode);
+ /* ??? error recovery */
+}
+
+static int fwnet_send_packet(struct fwnet_packet_task *ptask)
+{
+ struct fwnet_device *dev;
+ unsigned tx_len;
+ struct rfc2734_header *bufhdr;
+ unsigned long flags;
+
+ dev = ptask->dev;
+ tx_len = ptask->max_payload;
+ switch (fwnet_get_hdr_lf(&ptask->hdr)) {
+ case RFC2374_HDR_UNFRAG:
+ bufhdr = (struct rfc2734_header *)
+ skb_push(ptask->skb, RFC2374_UNFRAG_HDR_SIZE);
+ put_unaligned_be32(ptask->hdr.w0, &bufhdr->w0);
+ break;
+
+ case RFC2374_HDR_FIRSTFRAG:
+ case RFC2374_HDR_INTFRAG:
+ case RFC2374_HDR_LASTFRAG:
+ bufhdr = (struct rfc2734_header *)
+ skb_push(ptask->skb, RFC2374_FRAG_HDR_SIZE);
+ put_unaligned_be32(ptask->hdr.w0, &bufhdr->w0);
+ put_unaligned_be32(ptask->hdr.w1, &bufhdr->w1);
+ break;
+
+ default:
+ BUG();
+ }
+ if (ptask->dest_node == IEEE1394_ALL_NODES) {
+ u8 *p;
+ int generation;
+ int node_id;
+
+ /* ptask->generation may not have been set yet */
+ generation = dev->card->generation;
+ smp_rmb();
+ node_id = dev->card->node_id;
+
+ p = skb_push(ptask->skb, 8);
+ put_unaligned_be32(node_id << 16 | IANA_SPECIFIER_ID >> 8, p);
+ put_unaligned_be32((IANA_SPECIFIER_ID & 0xff) << 24
+ | RFC2734_SW_VERSION, &p[4]);
+
+ /* We should not transmit if broadcast_channel.valid == 0. */
+ fw_send_request(dev->card, &ptask->transaction,
+ TCODE_STREAM_DATA,
+ fw_stream_packet_destination_id(3,
+ IEEE1394_BROADCAST_CHANNEL, 0),
+ generation, SCODE_100, 0ULL, ptask->skb->data,
+ tx_len + 8, fwnet_write_complete, ptask);
+
+ /* FIXME race? */
+ spin_lock_irqsave(&dev->lock, flags);
+ list_add_tail(&ptask->pt_link, &dev->broadcasted_list);
+ spin_unlock_irqrestore(&dev->lock, flags);
+
+ return 0;
+ }
+
+ fw_send_request(dev->card, &ptask->transaction,
+ TCODE_WRITE_BLOCK_REQUEST, ptask->dest_node,
+ ptask->generation, ptask->speed, ptask->fifo_addr,
+ ptask->skb->data, tx_len, fwnet_write_complete, ptask);
+
+ /* FIXME race? */
+ spin_lock_irqsave(&dev->lock, flags);
+ list_add_tail(&ptask->pt_link, &dev->sent_list);
+ spin_unlock_irqrestore(&dev->lock, flags);
+
+ dev->netdev->trans_start = jiffies;
+
+ return 0;
+}
+
+static int fwnet_broadcast_start(struct fwnet_device *dev)
+{
+ struct fw_iso_context *context;
+ int retval;
+ unsigned num_packets;
+ unsigned max_receive;
+ struct fw_iso_packet packet;
+ unsigned long offset;
+ unsigned u;
+
+ if (dev->local_fifo == FWNET_NO_FIFO_ADDR) {
+ /* outside OHCI posted write area? */
+ static const struct fw_address_region region = {
+ .start = 0xffff00000000ULL,
+ .end = CSR_REGISTER_BASE,
+ };
+
+ dev->handler.length = 4096;
+ dev->handler.address_callback = fwnet_receive_packet;
+ dev->handler.callback_data = dev;
+
+ retval = fw_core_add_address_handler(&dev->handler, &region);
+ if (retval < 0)
+ goto failed_initial;
+
+ dev->local_fifo = dev->handler.offset;
+ }
+
+ max_receive = 1U << (dev->card->max_receive + 1);
+ num_packets = (FWNET_ISO_PAGE_COUNT * PAGE_SIZE) / max_receive;
+
+ if (!dev->broadcast_rcv_context) {
+ void **ptrptr;
+
+ context = fw_iso_context_create(dev->card,
+ FW_ISO_CONTEXT_RECEIVE, IEEE1394_BROADCAST_CHANNEL,
+ dev->card->link_speed, 8, fwnet_receive_broadcast, dev);
+ if (IS_ERR(context)) {
+ retval = PTR_ERR(context);
+ goto failed_context_create;
+ }
+
+ retval = fw_iso_buffer_init(&dev->broadcast_rcv_buffer,
+ dev->card, FWNET_ISO_PAGE_COUNT, DMA_FROM_DEVICE);
+ if (retval < 0)
+ goto failed_buffer_init;
+
+ ptrptr = kmalloc(sizeof(void *) * num_packets, GFP_KERNEL);
+ if (!ptrptr) {
+ retval = -ENOMEM;
+ goto failed_ptrs_alloc;
+ }
+
+ dev->broadcast_rcv_buffer_ptrs = ptrptr;
+ for (u = 0; u < FWNET_ISO_PAGE_COUNT; u++) {
+ void *ptr;
+ unsigned v;
+
+ ptr = kmap(dev->broadcast_rcv_buffer.pages[u]);
+ for (v = 0; v < num_packets / FWNET_ISO_PAGE_COUNT; v++)
+ *ptrptr++ = (void *)
+ ((char *)ptr + v * max_receive);
+ }
+ dev->broadcast_rcv_context = context;
+ } else {
+ context = dev->broadcast_rcv_context;
+ }
+
+ packet.payload_length = max_receive;
+ packet.interrupt = 1;
+ packet.skip = 0;
+ packet.tag = 3;
+ packet.sy = 0;
+ packet.header_length = IEEE1394_GASP_HDR_SIZE;
+ offset = 0;
+
+ for (u = 0; u < num_packets; u++) {
+ retval = fw_iso_context_queue(context, &packet,
+ &dev->broadcast_rcv_buffer, offset);
+ if (retval < 0)
+ goto failed_rcv_queue;
+
+ offset += max_receive;
+ }
+ dev->num_broadcast_rcv_ptrs = num_packets;
+ dev->rcv_buffer_size = max_receive;
+ dev->broadcast_rcv_next_ptr = 0U;
+ retval = fw_iso_context_start(context, -1, 0,
+ FW_ISO_CONTEXT_MATCH_ALL_TAGS); /* ??? sync */
+ if (retval < 0)
+ goto failed_rcv_queue;
+
+ /* FIXME: adjust it according to the min. speed of all known peers? */
+ dev->broadcast_xmt_max_payload = IEEE1394_MAX_PAYLOAD_S100
+ - IEEE1394_GASP_HDR_SIZE - RFC2374_UNFRAG_HDR_SIZE;
+ dev->broadcast_state = FWNET_BROADCAST_RUNNING;
+
+ return 0;
+
+ failed_rcv_queue:
+ kfree(dev->broadcast_rcv_buffer_ptrs);
+ dev->broadcast_rcv_buffer_ptrs = NULL;
+ failed_ptrs_alloc:
+ fw_iso_buffer_destroy(&dev->broadcast_rcv_buffer, dev->card);
+ failed_buffer_init:
+ fw_iso_context_destroy(context);
+ dev->broadcast_rcv_context = NULL;
+ failed_context_create:
+ fw_core_remove_address_handler(&dev->handler);
+ failed_initial:
+ dev->local_fifo = FWNET_NO_FIFO_ADDR;
+
+ return retval;
+}
+
+/* ifup */
+static int fwnet_open(struct net_device *net)
+{
+ struct fwnet_device *dev = netdev_priv(net);
+ int ret;
+
+ if (dev->broadcast_state == FWNET_BROADCAST_ERROR) {
+ ret = fwnet_broadcast_start(dev);
+ if (ret)
+ return ret;
+ }
+ netif_start_queue(net);
+
+ return 0;
+}
+
+/* ifdown */
+static int fwnet_stop(struct net_device *net)
+{
+ netif_stop_queue(net);
+
+ /* Deallocate iso context for use by other applications? */
+
+ return 0;
+}
+
+static int fwnet_tx(struct sk_buff *skb, struct net_device *net)
+{
+ struct fwnet_header hdr_buf;
+ struct fwnet_device *dev = netdev_priv(net);
+ __be16 proto;
+ u16 dest_node;
+ unsigned max_payload;
+ u16 dg_size;
+ u16 *datagram_label_ptr;
+ struct fwnet_packet_task *ptask;
+ struct fwnet_peer *peer;
+ unsigned long flags;
+
+ ptask = kmem_cache_alloc(fwnet_packet_task_cache, GFP_ATOMIC);
+ if (ptask == NULL)
+ goto fail;
+
+ skb = skb_share_check(skb, GFP_ATOMIC);
+ if (!skb)
+ goto fail;
+
+ /*
+ * Make a copy of the driver-specific header.
+ * We might need to rebuild the header on tx failure.
+ */
+ memcpy(&hdr_buf, skb->data, sizeof(hdr_buf));
+ skb_pull(skb, sizeof(hdr_buf));
+
+ proto = hdr_buf.h_proto;
+ dg_size = skb->len;
+
+ /* serialize access to peer, including peer->datagram_label */
+ spin_lock_irqsave(&dev->lock, flags);
+
+ /*
+ * Set the transmission type for the packet. ARP packets and IP
+ * broadcast packets are sent via GASP.
+ */
+ if (memcmp(hdr_buf.h_dest, net->broadcast, FWNET_ALEN) == 0
+ || proto == htons(ETH_P_ARP)
+ || (proto == htons(ETH_P_IP)
+ && IN_MULTICAST(ntohl(ip_hdr(skb)->daddr)))) {
+ max_payload = dev->broadcast_xmt_max_payload;
+ datagram_label_ptr = &dev->broadcast_xmt_datagramlabel;
+
+ ptask->fifo_addr = FWNET_NO_FIFO_ADDR;
+ ptask->generation = 0;
+ ptask->dest_node = IEEE1394_ALL_NODES;
+ ptask->speed = SCODE_100;
+ } else {
+ __be64 guid = get_unaligned((__be64 *)hdr_buf.h_dest);
+ u8 generation;
+
+ peer = fwnet_peer_find_by_guid(dev, be64_to_cpu(guid));
+ if (!peer || peer->fifo == FWNET_NO_FIFO_ADDR)
+ goto fail_unlock;
+
+ generation = peer->generation;
+ dest_node = peer->node_id;
+ max_payload = peer->max_payload;
+ datagram_label_ptr = &peer->datagram_label;
+
+ ptask->fifo_addr = peer->fifo;
+ ptask->generation = generation;
+ ptask->dest_node = dest_node;
+ ptask->speed = peer->speed;
+ }
+
+ /* If this is an ARP packet, convert it */
+ if (proto == htons(ETH_P_ARP)) {
+ struct arphdr *arp = (struct arphdr *)skb->data;
+ unsigned char *arp_ptr = (unsigned char *)(arp + 1);
+ struct rfc2734_arp *arp1394 = (struct rfc2734_arp *)skb->data;
+ __be32 ipaddr;
+
+ ipaddr = get_unaligned((__be32 *)(arp_ptr + FWNET_ALEN));
+
+ arp1394->hw_addr_len = RFC2734_HW_ADDR_LEN;
+ arp1394->max_rec = dev->card->max_receive;
+ arp1394->sspd = dev->card->link_speed;
+
+ put_unaligned_be16(dev->local_fifo >> 32,
+ &arp1394->fifo_hi);
+ put_unaligned_be32(dev->local_fifo & 0xffffffff,
+ &arp1394->fifo_lo);
+ put_unaligned(ipaddr, &arp1394->sip);
+ }
+
+ ptask->hdr.w0 = 0;
+ ptask->hdr.w1 = 0;
+ ptask->skb = skb;
+ ptask->dev = dev;
+
+ /* Does it all fit in one packet? */
+ if (dg_size <= max_payload) {
+ fwnet_make_uf_hdr(&ptask->hdr, ntohs(proto));
+ ptask->outstanding_pkts = 1;
+ max_payload = dg_size + RFC2374_UNFRAG_HDR_SIZE;
+ } else {
+ u16 datagram_label;
+
+ max_payload -= RFC2374_FRAG_OVERHEAD;
+ datagram_label = (*datagram_label_ptr)++;
+ fwnet_make_ff_hdr(&ptask->hdr, ntohs(proto), dg_size,
+ datagram_label);
+ ptask->outstanding_pkts = DIV_ROUND_UP(dg_size, max_payload);
+ max_payload += RFC2374_FRAG_HDR_SIZE;
+ }
+
+ spin_unlock_irqrestore(&dev->lock, flags);
+
+ ptask->max_payload = max_payload;
+ fwnet_send_packet(ptask);
+
+ return NETDEV_TX_OK;
+
+ fail_unlock:
+ spin_unlock_irqrestore(&dev->lock, flags);
+ fail:
+ if (ptask)
+ kmem_cache_free(fwnet_packet_task_cache, ptask);
+
+ if (skb != NULL)
+ dev_kfree_skb(skb);
+
+ net->stats.tx_dropped++;
+ net->stats.tx_errors++;
+
+ /*
+ * FIXME: According to a patch from 2003-02-26, "returning non-zero
+ * causes serious problems" here, allegedly. Before that patch,
+ * -ERRNO was returned which is not appropriate under Linux 2.6.
+ * Perhaps more needs to be done? Stop the queue in serious
+ * conditions and restart it elsewhere?
+ */
+ return NETDEV_TX_OK;
+}
+
+static int fwnet_change_mtu(struct net_device *net, int new_mtu)
+{
+ if (new_mtu < 68)
+ return -EINVAL;
+
+ net->mtu = new_mtu;
+ return 0;
+}
+
+static void fwnet_get_drvinfo(struct net_device *net,
+ struct ethtool_drvinfo *info)
+{
+ strcpy(info->driver, KBUILD_MODNAME);
+ strcpy(info->bus_info, "ieee1394");
+}
+
+static struct ethtool_ops fwnet_ethtool_ops = {
+ .get_drvinfo = fwnet_get_drvinfo,
+};
+
+static const struct net_device_ops fwnet_netdev_ops = {
+ .ndo_open = fwnet_open,
+ .ndo_stop = fwnet_stop,
+ .ndo_start_xmit = fwnet_tx,
+ .ndo_change_mtu = fwnet_change_mtu,
+};
+
+static void fwnet_init_dev(struct net_device *net)
+{
+ net->header_ops = &fwnet_header_ops;
+ net->netdev_ops = &fwnet_netdev_ops;
+ net->watchdog_timeo = 2 * HZ;
+ net->flags = IFF_BROADCAST | IFF_MULTICAST;
+ net->features = NETIF_F_HIGHDMA;
+ net->addr_len = FWNET_ALEN;
+ net->hard_header_len = FWNET_HLEN;
+ net->type = ARPHRD_IEEE1394;
+ net->tx_queue_len = 10;
+ SET_ETHTOOL_OPS(net, &fwnet_ethtool_ops);
+}
+
+/* caller must hold fwnet_device_mutex */
+static struct fwnet_device *fwnet_dev_find(struct fw_card *card)
+{
+ struct fwnet_device *dev;
+
+ list_for_each_entry(dev, &fwnet_device_list, dev_link)
+ if (dev->card == card)
+ return dev;
+
+ return NULL;
+}
+
+static int fwnet_add_peer(struct fwnet_device *dev,
+ struct fw_unit *unit, struct fw_device *device)
+{
+ struct fwnet_peer *peer;
+
+ peer = kmalloc(sizeof(*peer), GFP_KERNEL);
+ if (!peer)
+ return -ENOMEM;
+
+ dev_set_drvdata(&unit->device, peer);
+
+ peer->dev = dev;
+ peer->guid = (u64)device->config_rom[3] << 32 | device->config_rom[4];
+ peer->fifo = FWNET_NO_FIFO_ADDR;
+ INIT_LIST_HEAD(&peer->pd_list);
+ peer->pdg_size = 0;
+ peer->datagram_label = 0;
+ peer->speed = device->max_speed;
+ peer->max_payload = fwnet_max_payload(device->max_rec, peer->speed);
+
+ peer->generation = device->generation;
+ smp_rmb();
+ peer->node_id = device->node_id;
+
+ spin_lock_irq(&dev->lock);
+ list_add_tail(&peer->peer_link, &dev->peer_list);
+ spin_unlock_irq(&dev->lock);
+
+ return 0;
+}
+
+static int fwnet_probe(struct device *_dev)
+{
+ struct fw_unit *unit = fw_unit(_dev);
+ struct fw_device *device = fw_parent_device(unit);
+ struct fw_card *card = device->card;
+ struct net_device *net;
+ bool allocated_netdev = false;
+ struct fwnet_device *dev;
+ unsigned max_mtu;
+ int ret;
+
+ mutex_lock(&fwnet_device_mutex);
+
+ dev = fwnet_dev_find(card);
+ if (dev) {
+ net = dev->netdev;
+ goto have_dev;
+ }
+
+ net = alloc_netdev(sizeof(*dev), "firewire%d", fwnet_init_dev);
+ if (net == NULL) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ allocated_netdev = true;
+ SET_NETDEV_DEV(net, card->device);
+ dev = netdev_priv(net);
+
+ spin_lock_init(&dev->lock);
+ dev->broadcast_state = FWNET_BROADCAST_ERROR;
+ dev->broadcast_rcv_context = NULL;
+ dev->broadcast_xmt_max_payload = 0;
+ dev->broadcast_xmt_datagramlabel = 0;
+
+ dev->local_fifo = FWNET_NO_FIFO_ADDR;
+
+ INIT_LIST_HEAD(&dev->packet_list);
+ INIT_LIST_HEAD(&dev->broadcasted_list);
+ INIT_LIST_HEAD(&dev->sent_list);
+ INIT_LIST_HEAD(&dev->peer_list);
+
+ dev->card = card;
+ dev->netdev = net;
+
+ /*
+ * Use the RFC 2734 default 1500 octets or the maximum payload
+ * as initial MTU
+ */
+ max_mtu = (1 << (card->max_receive + 1))
+ - sizeof(struct rfc2734_header) - IEEE1394_GASP_HDR_SIZE;
+ net->mtu = min(1500U, max_mtu);
+
+ /* Set our hardware address while we're at it */
+ put_unaligned_be64(card->guid, net->dev_addr);
+ put_unaligned_be64(~0ULL, net->broadcast);
+ ret = register_netdev(net);
+ if (ret) {
+ fw_error("Cannot register the driver\n");
+ goto out;
+ }
+
+ list_add_tail(&dev->dev_link, &fwnet_device_list);
+ fw_notify("%s: IPv4 over FireWire on device %016llx\n",
+ net->name, (unsigned long long)card->guid);
+ have_dev:
+ ret = fwnet_add_peer(dev, unit, device);
+ if (ret && allocated_netdev) {
+ unregister_netdev(net);
+ list_del(&dev->dev_link);
+ }
+ out:
+ if (ret && allocated_netdev)
+ free_netdev(net);
+
+ mutex_unlock(&fwnet_device_mutex);
+
+ return ret;
+}
+
+static void fwnet_remove_peer(struct fwnet_peer *peer)
+{
+ struct fwnet_partial_datagram *pd, *pd_next;
+
+ spin_lock_irq(&peer->dev->lock);
+ list_del(&peer->peer_link);
+ spin_unlock_irq(&peer->dev->lock);
+
+ list_for_each_entry_safe(pd, pd_next, &peer->pd_list, pd_link)
+ fwnet_pd_delete(pd);
+
+ kfree(peer);
+}
+
+static int fwnet_remove(struct device *_dev)
+{
+ struct fwnet_peer *peer = dev_get_drvdata(_dev);
+ struct fwnet_device *dev = peer->dev;
+ struct net_device *net;
+ struct fwnet_packet_task *ptask, *pt_next;
+
+ mutex_lock(&fwnet_device_mutex);
+
+ fwnet_remove_peer(peer);
+
+ if (list_empty(&dev->peer_list)) {
+ net = dev->netdev;
+ unregister_netdev(net);
+
+ if (dev->local_fifo != FWNET_NO_FIFO_ADDR)
+ fw_core_remove_address_handler(&dev->handler);
+ if (dev->broadcast_rcv_context) {
+ fw_iso_context_stop(dev->broadcast_rcv_context);
+ fw_iso_buffer_destroy(&dev->broadcast_rcv_buffer,
+ dev->card);
+ fw_iso_context_destroy(dev->broadcast_rcv_context);
+ }
+ list_for_each_entry_safe(ptask, pt_next,
+ &dev->packet_list, pt_link) {
+ dev_kfree_skb_any(ptask->skb);
+ kmem_cache_free(fwnet_packet_task_cache, ptask);
+ }
+ list_for_each_entry_safe(ptask, pt_next,
+ &dev->broadcasted_list, pt_link) {
+ dev_kfree_skb_any(ptask->skb);
+ kmem_cache_free(fwnet_packet_task_cache, ptask);
+ }
+ list_for_each_entry_safe(ptask, pt_next,
+ &dev->sent_list, pt_link) {
+ dev_kfree_skb_any(ptask->skb);
+ kmem_cache_free(fwnet_packet_task_cache, ptask);
+ }
+ list_del(&dev->dev_link);
+
+ free_netdev(net);
+ }
+
+ mutex_unlock(&fwnet_device_mutex);
+
+ return 0;
+}
+
+/*
+ * FIXME abort partially sent fragmented datagrams,
+ * discard partially received fragmented datagrams
+ */
+static void fwnet_update(struct fw_unit *unit)
+{
+ struct fw_device *device = fw_parent_device(unit);
+ struct fwnet_peer *peer = dev_get_drvdata(&unit->device);
+ int generation;
+
+ generation = device->generation;
+
+ spin_lock_irq(&peer->dev->lock);
+ peer->node_id = device->node_id;
+ peer->generation = generation;
+ spin_unlock_irq(&peer->dev->lock);
+}
+
+static const struct ieee1394_device_id fwnet_id_table[] = {
+ {
+ .match_flags = IEEE1394_MATCH_SPECIFIER_ID |
+ IEEE1394_MATCH_VERSION,
+ .specifier_id = IANA_SPECIFIER_ID,
+ .version = RFC2734_SW_VERSION,
+ },
+ { }
+};
+
+static struct fw_driver fwnet_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "net",
+ .bus = &fw_bus_type,
+ .probe = fwnet_probe,
+ .remove = fwnet_remove,
+ },
+ .update = fwnet_update,
+ .id_table = fwnet_id_table,
+};
+
+static const u32 rfc2374_unit_directory_data[] = {
+ 0x00040000, /* directory_length */
+ 0x1200005e, /* unit_specifier_id: IANA */
+ 0x81000003, /* textual descriptor offset */
+ 0x13000001, /* unit_sw_version: RFC 2734 */
+ 0x81000005, /* textual descriptor offset */
+ 0x00030000, /* descriptor_length */
+ 0x00000000, /* text */
+ 0x00000000, /* minimal ASCII, en */
+ 0x49414e41, /* I A N A */
+ 0x00030000, /* descriptor_length */
+ 0x00000000, /* text */
+ 0x00000000, /* minimal ASCII, en */
+ 0x49507634, /* I P v 4 */
+};
+
+static struct fw_descriptor rfc2374_unit_directory = {
+ .length = ARRAY_SIZE(rfc2374_unit_directory_data),
+ .key = (CSR_DIRECTORY | CSR_UNIT) << 24,
+ .data = rfc2374_unit_directory_data
+};
+
+static int __init fwnet_init(void)
+{
+ int err;
+
+ err = fw_core_add_descriptor(&rfc2374_unit_directory);
+ if (err)
+ return err;
+
+ fwnet_packet_task_cache = kmem_cache_create("packet_task",
+ sizeof(struct fwnet_packet_task), 0, 0, NULL);
+ if (!fwnet_packet_task_cache) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ err = driver_register(&fwnet_driver.driver);
+ if (!err)
+ return 0;
+
+ kmem_cache_destroy(fwnet_packet_task_cache);
+out:
+ fw_core_remove_descriptor(&rfc2374_unit_directory);
+
+ return err;
+}
+module_init(fwnet_init);
+
+static void __exit fwnet_cleanup(void)
+{
+ driver_unregister(&fwnet_driver.driver);
+ kmem_cache_destroy(fwnet_packet_task_cache);
+ fw_core_remove_descriptor(&rfc2374_unit_directory);
+}
+module_exit(fwnet_cleanup);
+
+MODULE_AUTHOR("Jay Fenlason <fenlason@redhat.com>");
+MODULE_DESCRIPTION("IPv4 over IEEE1394 as per RFC 2734");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(ieee1394, fwnet_id_table);
diff --git a/drivers/firewire/sbp2.c b/drivers/firewire/sbp2.c
index 24c45635376a..8d51568ee143 100644
--- a/drivers/firewire/sbp2.c
+++ b/drivers/firewire/sbp2.c
@@ -201,6 +201,12 @@ static struct fw_device *target_device(struct sbp2_target *tgt)
#define SBP2_CYCLE_LIMIT (0xc8 << 12) /* 200 125us cycles */
/*
+ * There is no transport protocol limit to the CDB length, but we implement
+ * a fixed length only. 16 bytes is enough for disks larger than 2 TB.
+ */
+#define SBP2_MAX_CDB_SIZE 16
+
+/*
* The default maximum s/g segment size of a FireWire controller is
* usually 0x10000, but SBP-2 only allows 0xffff. Since buffers have to
* be quadlet-aligned, we set the length limit to 0xffff & ~3.
@@ -312,7 +318,7 @@ struct sbp2_command_orb {
struct sbp2_pointer next;
struct sbp2_pointer data_descriptor;
__be32 misc;
- u8 command_block[12];
+ u8 command_block[SBP2_MAX_CDB_SIZE];
} request;
struct scsi_cmnd *cmd;
scsi_done_fn_t done;
@@ -1146,6 +1152,8 @@ static int sbp2_probe(struct device *dev)
if (fw_device_enable_phys_dma(device) < 0)
goto fail_shost_put;
+ shost->max_cmd_len = SBP2_MAX_CDB_SIZE;
+
if (scsi_add_host(shost, &unit->device) < 0)
goto fail_shost_put;
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index 3582c39f9725..96dda81c9228 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -79,6 +79,12 @@ config GPIO_XILINX
help
Say yes here to support the Xilinx FPGA GPIO device
+config GPIO_VR41XX
+ tristate "NEC VR4100 series General-purpose I/O Uint support"
+ depends on CPU_VR41XX
+ help
+ Say yes here to support the NEC VR4100 series General-purpose I/O Uint
+
comment "I2C GPIO expanders:"
config GPIO_MAX732X
diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile
index ef90203e8f3c..9244c6fcd8be 100644
--- a/drivers/gpio/Makefile
+++ b/drivers/gpio/Makefile
@@ -13,3 +13,4 @@ obj-$(CONFIG_GPIO_PL061) += pl061.o
obj-$(CONFIG_GPIO_TWL4030) += twl4030-gpio.o
obj-$(CONFIG_GPIO_XILINX) += xilinx_gpio.o
obj-$(CONFIG_GPIO_BT8XX) += bt8xxgpio.o
+obj-$(CONFIG_GPIO_VR41XX) += vr41xx_giu.o
diff --git a/drivers/gpio/pl061.c b/drivers/gpio/pl061.c
index aa8e7cb020d9..4ee4c8367a3f 100644
--- a/drivers/gpio/pl061.c
+++ b/drivers/gpio/pl061.c
@@ -109,6 +109,16 @@ static void pl061_set_value(struct gpio_chip *gc, unsigned offset, int value)
writeb(!!value << offset, chip->base + (1 << (offset + 2)));
}
+static int pl061_to_irq(struct gpio_chip *gc, unsigned offset)
+{
+ struct pl061_gpio *chip = container_of(gc, struct pl061_gpio, gc);
+
+ if (chip->irq_base == (unsigned) -1)
+ return -EINVAL;
+
+ return chip->irq_base + offset;
+}
+
/*
* PL061 GPIO IRQ
*/
@@ -200,7 +210,7 @@ static void pl061_irq_handler(unsigned irq, struct irq_desc *desc)
desc->chip->ack(irq);
list_for_each(ptr, chip_list) {
unsigned long pending;
- int gpio;
+ int offset;
chip = list_entry(ptr, struct pl061_gpio, list);
pending = readb(chip->base + GPIOMIS);
@@ -209,8 +219,8 @@ static void pl061_irq_handler(unsigned irq, struct irq_desc *desc)
if (pending == 0)
continue;
- for_each_bit(gpio, &pending, PL061_GPIO_NR)
- generic_handle_irq(gpio_to_irq(gpio));
+ for_each_bit(offset, &pending, PL061_GPIO_NR)
+ generic_handle_irq(pl061_to_irq(&chip->gc, offset));
}
desc->chip->unmask(irq);
}
@@ -221,7 +231,7 @@ static int __init pl061_probe(struct amba_device *dev, struct amba_id *id)
struct pl061_gpio *chip;
struct list_head *chip_list;
int ret, irq, i;
- static unsigned long init_irq[BITS_TO_LONGS(NR_IRQS)];
+ static DECLARE_BITMAP(init_irq, NR_IRQS);
pdata = dev->dev.platform_data;
if (pdata == NULL)
@@ -251,6 +261,7 @@ static int __init pl061_probe(struct amba_device *dev, struct amba_id *id)
chip->gc.direction_output = pl061_direction_output;
chip->gc.get = pl061_get_value;
chip->gc.set = pl061_set_value;
+ chip->gc.to_irq = pl061_to_irq;
chip->gc.base = pdata->gpio_base;
chip->gc.ngpio = PL061_GPIO_NR;
chip->gc.label = dev_name(&dev->dev);
@@ -280,6 +291,7 @@ static int __init pl061_probe(struct amba_device *dev, struct amba_id *id)
if (!test_and_set_bit(irq, init_irq)) { /* list initialized? */
chip_list = kmalloc(sizeof(*chip_list), GFP_KERNEL);
if (chip_list == NULL) {
+ clear_bit(irq, init_irq);
ret = -ENOMEM;
goto iounmap;
}
diff --git a/drivers/gpio/vr41xx_giu.c b/drivers/gpio/vr41xx_giu.c
new file mode 100644
index 000000000000..b70e06133e78
--- /dev/null
+++ b/drivers/gpio/vr41xx_giu.c
@@ -0,0 +1,586 @@
+/*
+ * Driver for NEC VR4100 series General-purpose I/O Unit.
+ *
+ * Copyright (C) 2002 MontaVista Software Inc.
+ * Author: Yoichi Yuasa <source@mvista.com>
+ * Copyright (C) 2003-2009 Yoichi Yuasa <yuasa@linux-mips.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#include <linux/errno.h>
+#include <linux/fs.h>
+#include <linux/gpio.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/smp_lock.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+
+#include <asm/vr41xx/giu.h>
+#include <asm/vr41xx/irq.h>
+#include <asm/vr41xx/vr41xx.h>
+
+MODULE_AUTHOR("Yoichi Yuasa <yuasa@linux-mips.org>");
+MODULE_DESCRIPTION("NEC VR4100 series General-purpose I/O Unit driver");
+MODULE_LICENSE("GPL");
+
+#define GIUIOSELL 0x00
+#define GIUIOSELH 0x02
+#define GIUPIODL 0x04
+#define GIUPIODH 0x06
+#define GIUINTSTATL 0x08
+#define GIUINTSTATH 0x0a
+#define GIUINTENL 0x0c
+#define GIUINTENH 0x0e
+#define GIUINTTYPL 0x10
+#define GIUINTTYPH 0x12
+#define GIUINTALSELL 0x14
+#define GIUINTALSELH 0x16
+#define GIUINTHTSELL 0x18
+#define GIUINTHTSELH 0x1a
+#define GIUPODATL 0x1c
+#define GIUPODATEN 0x1c
+#define GIUPODATH 0x1e
+ #define PIOEN0 0x0100
+ #define PIOEN1 0x0200
+#define GIUPODAT 0x1e
+#define GIUFEDGEINHL 0x20
+#define GIUFEDGEINHH 0x22
+#define GIUREDGEINHL 0x24
+#define GIUREDGEINHH 0x26
+
+#define GIUUSEUPDN 0x1e0
+#define GIUTERMUPDN 0x1e2
+
+#define GPIO_HAS_PULLUPDOWN_IO 0x0001
+#define GPIO_HAS_OUTPUT_ENABLE 0x0002
+#define GPIO_HAS_INTERRUPT_EDGE_SELECT 0x0100
+
+enum {
+ GPIO_INPUT,
+ GPIO_OUTPUT,
+};
+
+static DEFINE_SPINLOCK(giu_lock);
+static unsigned long giu_flags;
+
+static void __iomem *giu_base;
+
+#define giu_read(offset) readw(giu_base + (offset))
+#define giu_write(offset, value) writew((value), giu_base + (offset))
+
+#define GPIO_PIN_OF_IRQ(irq) ((irq) - GIU_IRQ_BASE)
+#define GIUINT_HIGH_OFFSET 16
+#define GIUINT_HIGH_MAX 32
+
+static inline u16 giu_set(u16 offset, u16 set)
+{
+ u16 data;
+
+ data = giu_read(offset);
+ data |= set;
+ giu_write(offset, data);
+
+ return data;
+}
+
+static inline u16 giu_clear(u16 offset, u16 clear)
+{
+ u16 data;
+
+ data = giu_read(offset);
+ data &= ~clear;
+ giu_write(offset, data);
+
+ return data;
+}
+
+static void ack_giuint_low(unsigned int irq)
+{
+ giu_write(GIUINTSTATL, 1 << GPIO_PIN_OF_IRQ(irq));
+}
+
+static void mask_giuint_low(unsigned int irq)
+{
+ giu_clear(GIUINTENL, 1 << GPIO_PIN_OF_IRQ(irq));
+}
+
+static void mask_ack_giuint_low(unsigned int irq)
+{
+ unsigned int pin;
+
+ pin = GPIO_PIN_OF_IRQ(irq);
+ giu_clear(GIUINTENL, 1 << pin);
+ giu_write(GIUINTSTATL, 1 << pin);
+}
+
+static void unmask_giuint_low(unsigned int irq)
+{
+ giu_set(GIUINTENL, 1 << GPIO_PIN_OF_IRQ(irq));
+}
+
+static struct irq_chip giuint_low_irq_chip = {
+ .name = "GIUINTL",
+ .ack = ack_giuint_low,
+ .mask = mask_giuint_low,
+ .mask_ack = mask_ack_giuint_low,
+ .unmask = unmask_giuint_low,
+};
+
+static void ack_giuint_high(unsigned int irq)
+{
+ giu_write(GIUINTSTATH,
+ 1 << (GPIO_PIN_OF_IRQ(irq) - GIUINT_HIGH_OFFSET));
+}
+
+static void mask_giuint_high(unsigned int irq)
+{
+ giu_clear(GIUINTENH, 1 << (GPIO_PIN_OF_IRQ(irq) - GIUINT_HIGH_OFFSET));
+}
+
+static void mask_ack_giuint_high(unsigned int irq)
+{
+ unsigned int pin;
+
+ pin = GPIO_PIN_OF_IRQ(irq) - GIUINT_HIGH_OFFSET;
+ giu_clear(GIUINTENH, 1 << pin);
+ giu_write(GIUINTSTATH, 1 << pin);
+}
+
+static void unmask_giuint_high(unsigned int irq)
+{
+ giu_set(GIUINTENH, 1 << (GPIO_PIN_OF_IRQ(irq) - GIUINT_HIGH_OFFSET));
+}
+
+static struct irq_chip giuint_high_irq_chip = {
+ .name = "GIUINTH",
+ .ack = ack_giuint_high,
+ .mask = mask_giuint_high,
+ .mask_ack = mask_ack_giuint_high,
+ .unmask = unmask_giuint_high,
+};
+
+static int giu_get_irq(unsigned int irq)
+{
+ u16 pendl, pendh, maskl, maskh;
+ int i;
+
+ pendl = giu_read(GIUINTSTATL);
+ pendh = giu_read(GIUINTSTATH);
+ maskl = giu_read(GIUINTENL);
+ maskh = giu_read(GIUINTENH);
+
+ maskl &= pendl;
+ maskh &= pendh;
+
+ if (maskl) {
+ for (i = 0; i < 16; i++) {
+ if (maskl & (1 << i))
+ return GIU_IRQ(i);
+ }
+ } else if (maskh) {
+ for (i = 0; i < 16; i++) {
+ if (maskh & (1 << i))
+ return GIU_IRQ(i + GIUINT_HIGH_OFFSET);
+ }
+ }
+
+ printk(KERN_ERR "spurious GIU interrupt: %04x(%04x),%04x(%04x)\n",
+ maskl, pendl, maskh, pendh);
+
+ atomic_inc(&irq_err_count);
+
+ return -EINVAL;
+}
+
+void vr41xx_set_irq_trigger(unsigned int pin, irq_trigger_t trigger,
+ irq_signal_t signal)
+{
+ u16 mask;
+
+ if (pin < GIUINT_HIGH_OFFSET) {
+ mask = 1 << pin;
+ if (trigger != IRQ_TRIGGER_LEVEL) {
+ giu_set(GIUINTTYPL, mask);
+ if (signal == IRQ_SIGNAL_HOLD)
+ giu_set(GIUINTHTSELL, mask);
+ else
+ giu_clear(GIUINTHTSELL, mask);
+ if (giu_flags & GPIO_HAS_INTERRUPT_EDGE_SELECT) {
+ switch (trigger) {
+ case IRQ_TRIGGER_EDGE_FALLING:
+ giu_set(GIUFEDGEINHL, mask);
+ giu_clear(GIUREDGEINHL, mask);
+ break;
+ case IRQ_TRIGGER_EDGE_RISING:
+ giu_clear(GIUFEDGEINHL, mask);
+ giu_set(GIUREDGEINHL, mask);
+ break;
+ default:
+ giu_set(GIUFEDGEINHL, mask);
+ giu_set(GIUREDGEINHL, mask);
+ break;
+ }
+ }
+ set_irq_chip_and_handler(GIU_IRQ(pin),
+ &giuint_low_irq_chip,
+ handle_edge_irq);
+ } else {
+ giu_clear(GIUINTTYPL, mask);
+ giu_clear(GIUINTHTSELL, mask);
+ set_irq_chip_and_handler(GIU_IRQ(pin),
+ &giuint_low_irq_chip,
+ handle_level_irq);
+ }
+ giu_write(GIUINTSTATL, mask);
+ } else if (pin < GIUINT_HIGH_MAX) {
+ mask = 1 << (pin - GIUINT_HIGH_OFFSET);
+ if (trigger != IRQ_TRIGGER_LEVEL) {
+ giu_set(GIUINTTYPH, mask);
+ if (signal == IRQ_SIGNAL_HOLD)
+ giu_set(GIUINTHTSELH, mask);
+ else
+ giu_clear(GIUINTHTSELH, mask);
+ if (giu_flags & GPIO_HAS_INTERRUPT_EDGE_SELECT) {
+ switch (trigger) {
+ case IRQ_TRIGGER_EDGE_FALLING:
+ giu_set(GIUFEDGEINHH, mask);
+ giu_clear(GIUREDGEINHH, mask);
+ break;
+ case IRQ_TRIGGER_EDGE_RISING:
+ giu_clear(GIUFEDGEINHH, mask);
+ giu_set(GIUREDGEINHH, mask);
+ break;
+ default:
+ giu_set(GIUFEDGEINHH, mask);
+ giu_set(GIUREDGEINHH, mask);
+ break;
+ }
+ }
+ set_irq_chip_and_handler(GIU_IRQ(pin),
+ &giuint_high_irq_chip,
+ handle_edge_irq);
+ } else {
+ giu_clear(GIUINTTYPH, mask);
+ giu_clear(GIUINTHTSELH, mask);
+ set_irq_chip_and_handler(GIU_IRQ(pin),
+ &giuint_high_irq_chip,
+ handle_level_irq);
+ }
+ giu_write(GIUINTSTATH, mask);
+ }
+}
+EXPORT_SYMBOL_GPL(vr41xx_set_irq_trigger);
+
+void vr41xx_set_irq_level(unsigned int pin, irq_level_t level)
+{
+ u16 mask;
+
+ if (pin < GIUINT_HIGH_OFFSET) {
+ mask = 1 << pin;
+ if (level == IRQ_LEVEL_HIGH)
+ giu_set(GIUINTALSELL, mask);
+ else
+ giu_clear(GIUINTALSELL, mask);
+ giu_write(GIUINTSTATL, mask);
+ } else if (pin < GIUINT_HIGH_MAX) {
+ mask = 1 << (pin - GIUINT_HIGH_OFFSET);
+ if (level == IRQ_LEVEL_HIGH)
+ giu_set(GIUINTALSELH, mask);
+ else
+ giu_clear(GIUINTALSELH, mask);
+ giu_write(GIUINTSTATH, mask);
+ }
+}
+EXPORT_SYMBOL_GPL(vr41xx_set_irq_level);
+
+static int giu_set_direction(struct gpio_chip *chip, unsigned pin, int dir)
+{
+ u16 offset, mask, reg;
+ unsigned long flags;
+
+ if (pin >= chip->ngpio)
+ return -EINVAL;
+
+ if (pin < 16) {
+ offset = GIUIOSELL;
+ mask = 1 << pin;
+ } else if (pin < 32) {
+ offset = GIUIOSELH;
+ mask = 1 << (pin - 16);
+ } else {
+ if (giu_flags & GPIO_HAS_OUTPUT_ENABLE) {
+ offset = GIUPODATEN;
+ mask = 1 << (pin - 32);
+ } else {
+ switch (pin) {
+ case 48:
+ offset = GIUPODATH;
+ mask = PIOEN0;
+ break;
+ case 49:
+ offset = GIUPODATH;
+ mask = PIOEN1;
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+ }
+
+ spin_lock_irqsave(&giu_lock, flags);
+
+ reg = giu_read(offset);
+ if (dir == GPIO_OUTPUT)
+ reg |= mask;
+ else
+ reg &= ~mask;
+ giu_write(offset, reg);
+
+ spin_unlock_irqrestore(&giu_lock, flags);
+
+ return 0;
+}
+
+int vr41xx_gpio_pullupdown(unsigned int pin, gpio_pull_t pull)
+{
+ u16 reg, mask;
+ unsigned long flags;
+
+ if ((giu_flags & GPIO_HAS_PULLUPDOWN_IO) != GPIO_HAS_PULLUPDOWN_IO)
+ return -EPERM;
+
+ if (pin >= 15)
+ return -EINVAL;
+
+ mask = 1 << pin;
+
+ spin_lock_irqsave(&giu_lock, flags);
+
+ if (pull == GPIO_PULL_UP || pull == GPIO_PULL_DOWN) {
+ reg = giu_read(GIUTERMUPDN);
+ if (pull == GPIO_PULL_UP)
+ reg |= mask;
+ else
+ reg &= ~mask;
+ giu_write(GIUTERMUPDN, reg);
+
+ reg = giu_read(GIUUSEUPDN);
+ reg |= mask;
+ giu_write(GIUUSEUPDN, reg);
+ } else {
+ reg = giu_read(GIUUSEUPDN);
+ reg &= ~mask;
+ giu_write(GIUUSEUPDN, reg);
+ }
+
+ spin_unlock_irqrestore(&giu_lock, flags);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(vr41xx_gpio_pullupdown);
+
+static int vr41xx_gpio_get(struct gpio_chip *chip, unsigned pin)
+{
+ u16 reg, mask;
+
+ if (pin >= chip->ngpio)
+ return -EINVAL;
+
+ if (pin < 16) {
+ reg = giu_read(GIUPIODL);
+ mask = 1 << pin;
+ } else if (pin < 32) {
+ reg = giu_read(GIUPIODH);
+ mask = 1 << (pin - 16);
+ } else if (pin < 48) {
+ reg = giu_read(GIUPODATL);
+ mask = 1 << (pin - 32);
+ } else {
+ reg = giu_read(GIUPODATH);
+ mask = 1 << (pin - 48);
+ }
+
+ if (reg & mask)
+ return 1;
+
+ return 0;
+}
+
+static void vr41xx_gpio_set(struct gpio_chip *chip, unsigned pin,
+ int value)
+{
+ u16 offset, mask, reg;
+ unsigned long flags;
+
+ if (pin >= chip->ngpio)
+ return;
+
+ if (pin < 16) {
+ offset = GIUPIODL;
+ mask = 1 << pin;
+ } else if (pin < 32) {
+ offset = GIUPIODH;
+ mask = 1 << (pin - 16);
+ } else if (pin < 48) {
+ offset = GIUPODATL;
+ mask = 1 << (pin - 32);
+ } else {
+ offset = GIUPODATH;
+ mask = 1 << (pin - 48);
+ }
+
+ spin_lock_irqsave(&giu_lock, flags);
+
+ reg = giu_read(offset);
+ if (value)
+ reg |= mask;
+ else
+ reg &= ~mask;
+ giu_write(offset, reg);
+
+ spin_unlock_irqrestore(&giu_lock, flags);
+}
+
+
+static int vr41xx_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
+{
+ return giu_set_direction(chip, offset, GPIO_INPUT);
+}
+
+static int vr41xx_gpio_direction_output(struct gpio_chip *chip, unsigned offset,
+ int value)
+{
+ vr41xx_gpio_set(chip, offset, value);
+
+ return giu_set_direction(chip, offset, GPIO_OUTPUT);
+}
+
+static int vr41xx_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
+{
+ if (offset >= chip->ngpio)
+ return -EINVAL;
+
+ return GIU_IRQ_BASE + offset;
+}
+
+static struct gpio_chip vr41xx_gpio_chip = {
+ .label = "vr41xx",
+ .owner = THIS_MODULE,
+ .direction_input = vr41xx_gpio_direction_input,
+ .get = vr41xx_gpio_get,
+ .direction_output = vr41xx_gpio_direction_output,
+ .set = vr41xx_gpio_set,
+ .to_irq = vr41xx_gpio_to_irq,
+};
+
+static int __devinit giu_probe(struct platform_device *pdev)
+{
+ struct resource *res;
+ unsigned int trigger, i, pin;
+ struct irq_chip *chip;
+ int irq, retval;
+
+ switch (pdev->id) {
+ case GPIO_50PINS_PULLUPDOWN:
+ giu_flags = GPIO_HAS_PULLUPDOWN_IO;
+ vr41xx_gpio_chip.ngpio = 50;
+ break;
+ case GPIO_36PINS:
+ vr41xx_gpio_chip.ngpio = 36;
+ break;
+ case GPIO_48PINS_EDGE_SELECT:
+ giu_flags = GPIO_HAS_INTERRUPT_EDGE_SELECT;
+ vr41xx_gpio_chip.ngpio = 48;
+ break;
+ default:
+ dev_err(&pdev->dev, "GIU: unknown ID %d\n", pdev->id);
+ return -ENODEV;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -EBUSY;
+
+ giu_base = ioremap(res->start, res->end - res->start + 1);
+ if (!giu_base)
+ return -ENOMEM;
+
+ vr41xx_gpio_chip.dev = &pdev->dev;
+
+ retval = gpiochip_add(&vr41xx_gpio_chip);
+
+ giu_write(GIUINTENL, 0);
+ giu_write(GIUINTENH, 0);
+
+ trigger = giu_read(GIUINTTYPH) << 16;
+ trigger |= giu_read(GIUINTTYPL);
+ for (i = GIU_IRQ_BASE; i <= GIU_IRQ_LAST; i++) {
+ pin = GPIO_PIN_OF_IRQ(i);
+ if (pin < GIUINT_HIGH_OFFSET)
+ chip = &giuint_low_irq_chip;
+ else
+ chip = &giuint_high_irq_chip;
+
+ if (trigger & (1 << pin))
+ set_irq_chip_and_handler(i, chip, handle_edge_irq);
+ else
+ set_irq_chip_and_handler(i, chip, handle_level_irq);
+
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0 || irq >= nr_irqs)
+ return -EBUSY;
+
+ return cascade_irq(irq, giu_get_irq);
+}
+
+static int __devexit giu_remove(struct platform_device *pdev)
+{
+ if (giu_base) {
+ iounmap(giu_base);
+ giu_base = NULL;
+ }
+
+ return 0;
+}
+
+static struct platform_driver giu_device_driver = {
+ .probe = giu_probe,
+ .remove = __devexit_p(giu_remove),
+ .driver = {
+ .name = "GIU",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init vr41xx_giu_init(void)
+{
+ return platform_driver_register(&giu_device_driver);
+}
+
+static void __exit vr41xx_giu_exit(void)
+{
+ platform_driver_unregister(&giu_device_driver);
+}
+
+module_init(vr41xx_giu_init);
+module_exit(vr41xx_giu_exit);
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index c961fe415aef..39b393d38bb3 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -81,6 +81,7 @@ config DRM_I830
config DRM_I915
tristate "i915 driver"
+ depends on AGP_INTEL
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index 4e89ab08b7b8..fe23f29f7cba 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -16,6 +16,7 @@ drm-y := drm_auth.o drm_bufs.o drm_cache.o \
drm-$(CONFIG_COMPAT) += drm_ioc32.o
obj-$(CONFIG_DRM) += drm.o
+obj-$(CONFIG_DRM_TTM) += ttm/
obj-$(CONFIG_DRM_TDFX) += tdfx/
obj-$(CONFIG_DRM_R128) += r128/
obj-$(CONFIG_DRM_RADEON)+= radeon/
@@ -26,4 +27,3 @@ obj-$(CONFIG_DRM_I915) += i915/
obj-$(CONFIG_DRM_SIS) += sis/
obj-$(CONFIG_DRM_SAVAGE)+= savage/
obj-$(CONFIG_DRM_VIA) +=via/
-obj-$(CONFIG_DRM_TTM) += ttm/
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index a6f73f1e99d9..3da9cfa31848 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -1090,6 +1090,8 @@ int drm_helper_resume_force_mode(struct drm_device *dev)
if (ret == false)
DRM_ERROR("failed to set mode on crtc %p\n", crtc);
}
+ /* disable the unused connectors while restoring the modesetting */
+ drm_helper_disable_unused_functions(dev);
return 0;
}
EXPORT_SYMBOL(drm_helper_resume_force_mode);
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 7d0835226f6e..80cc6d06d61b 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -294,10 +294,10 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_device *dev,
unsigned vactive = (pt->vactive_vblank_hi & 0xf0) << 4 | pt->vactive_lo;
unsigned hblank = (pt->hactive_hblank_hi & 0xf) << 8 | pt->hblank_lo;
unsigned vblank = (pt->vactive_vblank_hi & 0xf) << 8 | pt->vblank_lo;
- unsigned hsync_offset = (pt->hsync_vsync_offset_pulse_width_hi & 0x3) << 8 | pt->hsync_offset_lo;
- unsigned hsync_pulse_width = (pt->hsync_vsync_offset_pulse_width_hi & 0xc) << 6 | pt->hsync_pulse_width_lo;
- unsigned vsync_offset = (pt->hsync_vsync_offset_pulse_width_hi & 0x30) | (pt->vsync_offset_pulse_width_lo & 0xf);
- unsigned vsync_pulse_width = (pt->hsync_vsync_offset_pulse_width_hi & 0xc0) >> 2 | pt->vsync_offset_pulse_width_lo >> 4;
+ unsigned hsync_offset = (pt->hsync_vsync_offset_pulse_width_hi & 0xc0) << 2 | pt->hsync_offset_lo;
+ unsigned hsync_pulse_width = (pt->hsync_vsync_offset_pulse_width_hi & 0x30) << 4 | pt->hsync_pulse_width_lo;
+ unsigned vsync_offset = (pt->hsync_vsync_offset_pulse_width_hi & 0xc) >> 2 | pt->vsync_offset_pulse_width_lo >> 4;
+ unsigned vsync_pulse_width = (pt->hsync_vsync_offset_pulse_width_hi & 0x3) << 4 | (pt->vsync_offset_pulse_width_lo & 0xf);
/* ignore tiny modes */
if (hactive < 64 || vactive < 64)
@@ -347,8 +347,8 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_device *dev,
mode->flags |= (pt->misc & DRM_EDID_PT_VSYNC_POSITIVE) ?
DRM_MODE_FLAG_PVSYNC : DRM_MODE_FLAG_NVSYNC;
- mode->width_mm = pt->width_mm_lo | (pt->width_height_mm_hi & 0xf) << 8;
- mode->height_mm = pt->height_mm_lo | (pt->width_height_mm_hi & 0xf0) << 4;
+ mode->width_mm = pt->width_mm_lo | (pt->width_height_mm_hi & 0xf0) << 4;
+ mode->height_mm = pt->height_mm_lo | (pt->width_height_mm_hi & 0xf) << 8;
if (quirks & EDID_QUIRK_DETAILED_IN_CM) {
mode->width_mm *= 10;
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 51c5a050aa73..30d6b99fb302 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -13,6 +13,8 @@ i915-y := i915_drv.o i915_dma.o i915_irq.o i915_mem.o \
intel_crt.o \
intel_lvds.o \
intel_bios.o \
+ intel_dp.o \
+ intel_dp_i2c.o \
intel_hdmi.o \
intel_sdvo.o \
intel_modes.o \
diff --git a/drivers/gpu/drm/i915/dvo.h b/drivers/gpu/drm/i915/dvo.h
index e747ac42fe3a..288fc50627e2 100644
--- a/drivers/gpu/drm/i915/dvo.h
+++ b/drivers/gpu/drm/i915/dvo.h
@@ -37,7 +37,7 @@ struct intel_dvo_device {
/* GPIO register used for i2c bus to control this device */
u32 gpio;
int slave_addr;
- struct intel_i2c_chan *i2c_bus;
+ struct i2c_adapter *i2c_bus;
const struct intel_dvo_dev_ops *dev_ops;
void *dev_priv;
@@ -52,7 +52,7 @@ struct intel_dvo_dev_ops {
* Returns NULL if the device does not exist.
*/
bool (*init)(struct intel_dvo_device *dvo,
- struct intel_i2c_chan *i2cbus);
+ struct i2c_adapter *i2cbus);
/*
* Called to allow the output a chance to create properties after the
diff --git a/drivers/gpu/drm/i915/dvo_ch7017.c b/drivers/gpu/drm/i915/dvo_ch7017.c
index 03d4b4973b02..621815b531db 100644
--- a/drivers/gpu/drm/i915/dvo_ch7017.c
+++ b/drivers/gpu/drm/i915/dvo_ch7017.c
@@ -176,19 +176,20 @@ static void ch7017_dpms(struct intel_dvo_device *dvo, int mode);
static bool ch7017_read(struct intel_dvo_device *dvo, int addr, uint8_t *val)
{
- struct intel_i2c_chan *i2cbus = dvo->i2c_bus;
+ struct i2c_adapter *adapter = dvo->i2c_bus;
+ struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter);
u8 out_buf[2];
u8 in_buf[2];
struct i2c_msg msgs[] = {
{
- .addr = i2cbus->slave_addr,
+ .addr = dvo->slave_addr,
.flags = 0,
.len = 1,
.buf = out_buf,
},
{
- .addr = i2cbus->slave_addr,
+ .addr = dvo->slave_addr,
.flags = I2C_M_RD,
.len = 1,
.buf = in_buf,
@@ -208,10 +209,11 @@ static bool ch7017_read(struct intel_dvo_device *dvo, int addr, uint8_t *val)
static bool ch7017_write(struct intel_dvo_device *dvo, int addr, uint8_t val)
{
- struct intel_i2c_chan *i2cbus = dvo->i2c_bus;
+ struct i2c_adapter *adapter = dvo->i2c_bus;
+ struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter);
uint8_t out_buf[2];
struct i2c_msg msg = {
- .addr = i2cbus->slave_addr,
+ .addr = dvo->slave_addr,
.flags = 0,
.len = 2,
.buf = out_buf,
@@ -228,8 +230,9 @@ static bool ch7017_write(struct intel_dvo_device *dvo, int addr, uint8_t val)
/** Probes for a CH7017 on the given bus and slave address. */
static bool ch7017_init(struct intel_dvo_device *dvo,
- struct intel_i2c_chan *i2cbus)
+ struct i2c_adapter *adapter)
{
+ struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter);
struct ch7017_priv *priv;
uint8_t val;
@@ -237,8 +240,7 @@ static bool ch7017_init(struct intel_dvo_device *dvo,
if (priv == NULL)
return false;
- dvo->i2c_bus = i2cbus;
- dvo->i2c_bus->slave_addr = dvo->slave_addr;
+ dvo->i2c_bus = adapter;
dvo->dev_priv = priv;
if (!ch7017_read(dvo, CH7017_DEVICE_ID, &val))
@@ -248,7 +250,7 @@ static bool ch7017_init(struct intel_dvo_device *dvo,
val != CH7018_DEVICE_ID_VALUE &&
val != CH7019_DEVICE_ID_VALUE) {
DRM_DEBUG("ch701x not detected, got %d: from %s Slave %d.\n",
- val, i2cbus->adapter.name,i2cbus->slave_addr);
+ val, i2cbus->adapter.name,dvo->slave_addr);
goto fail;
}
diff --git a/drivers/gpu/drm/i915/dvo_ch7xxx.c b/drivers/gpu/drm/i915/dvo_ch7xxx.c
index d2fd95dbd034..a9b896289680 100644
--- a/drivers/gpu/drm/i915/dvo_ch7xxx.c
+++ b/drivers/gpu/drm/i915/dvo_ch7xxx.c
@@ -123,19 +123,20 @@ static char *ch7xxx_get_id(uint8_t vid)
static bool ch7xxx_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
{
struct ch7xxx_priv *ch7xxx= dvo->dev_priv;
- struct intel_i2c_chan *i2cbus = dvo->i2c_bus;
+ struct i2c_adapter *adapter = dvo->i2c_bus;
+ struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter);
u8 out_buf[2];
u8 in_buf[2];
struct i2c_msg msgs[] = {
{
- .addr = i2cbus->slave_addr,
+ .addr = dvo->slave_addr,
.flags = 0,
.len = 1,
.buf = out_buf,
},
{
- .addr = i2cbus->slave_addr,
+ .addr = dvo->slave_addr,
.flags = I2C_M_RD,
.len = 1,
.buf = in_buf,
@@ -152,7 +153,7 @@ static bool ch7xxx_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
if (!ch7xxx->quiet) {
DRM_DEBUG("Unable to read register 0x%02x from %s:%02x.\n",
- addr, i2cbus->adapter.name, i2cbus->slave_addr);
+ addr, i2cbus->adapter.name, dvo->slave_addr);
}
return false;
}
@@ -161,10 +162,11 @@ static bool ch7xxx_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
static bool ch7xxx_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch)
{
struct ch7xxx_priv *ch7xxx = dvo->dev_priv;
- struct intel_i2c_chan *i2cbus = dvo->i2c_bus;
+ struct i2c_adapter *adapter = dvo->i2c_bus;
+ struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter);
uint8_t out_buf[2];
struct i2c_msg msg = {
- .addr = i2cbus->slave_addr,
+ .addr = dvo->slave_addr,
.flags = 0,
.len = 2,
.buf = out_buf,
@@ -178,14 +180,14 @@ static bool ch7xxx_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch)
if (!ch7xxx->quiet) {
DRM_DEBUG("Unable to write register 0x%02x to %s:%d.\n",
- addr, i2cbus->adapter.name, i2cbus->slave_addr);
+ addr, i2cbus->adapter.name, dvo->slave_addr);
}
return false;
}
static bool ch7xxx_init(struct intel_dvo_device *dvo,
- struct intel_i2c_chan *i2cbus)
+ struct i2c_adapter *adapter)
{
/* this will detect the CH7xxx chip on the specified i2c bus */
struct ch7xxx_priv *ch7xxx;
@@ -196,8 +198,7 @@ static bool ch7xxx_init(struct intel_dvo_device *dvo,
if (ch7xxx == NULL)
return false;
- dvo->i2c_bus = i2cbus;
- dvo->i2c_bus->slave_addr = dvo->slave_addr;
+ dvo->i2c_bus = adapter;
dvo->dev_priv = ch7xxx;
ch7xxx->quiet = true;
@@ -207,7 +208,7 @@ static bool ch7xxx_init(struct intel_dvo_device *dvo,
name = ch7xxx_get_id(vendor);
if (!name) {
DRM_DEBUG("ch7xxx not detected; got 0x%02x from %s slave %d.\n",
- vendor, i2cbus->adapter.name, i2cbus->slave_addr);
+ vendor, adapter->name, dvo->slave_addr);
goto out;
}
@@ -217,7 +218,7 @@ static bool ch7xxx_init(struct intel_dvo_device *dvo,
if (device != CH7xxx_DID) {
DRM_DEBUG("ch7xxx not detected; got 0x%02x from %s slave %d.\n",
- vendor, i2cbus->adapter.name, i2cbus->slave_addr);
+ vendor, adapter->name, dvo->slave_addr);
goto out;
}
diff --git a/drivers/gpu/drm/i915/dvo_ivch.c b/drivers/gpu/drm/i915/dvo_ivch.c
index 0c8d375e8e37..aa176f9921fe 100644
--- a/drivers/gpu/drm/i915/dvo_ivch.c
+++ b/drivers/gpu/drm/i915/dvo_ivch.c
@@ -169,13 +169,14 @@ static void ivch_dump_regs(struct intel_dvo_device *dvo);
static bool ivch_read(struct intel_dvo_device *dvo, int addr, uint16_t *data)
{
struct ivch_priv *priv = dvo->dev_priv;
- struct intel_i2c_chan *i2cbus = dvo->i2c_bus;
+ struct i2c_adapter *adapter = dvo->i2c_bus;
+ struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter);
u8 out_buf[1];
u8 in_buf[2];
struct i2c_msg msgs[] = {
{
- .addr = i2cbus->slave_addr,
+ .addr = dvo->slave_addr,
.flags = I2C_M_RD,
.len = 0,
},
@@ -186,7 +187,7 @@ static bool ivch_read(struct intel_dvo_device *dvo, int addr, uint16_t *data)
.buf = out_buf,
},
{
- .addr = i2cbus->slave_addr,
+ .addr = dvo->slave_addr,
.flags = I2C_M_RD | I2C_M_NOSTART,
.len = 2,
.buf = in_buf,
@@ -202,7 +203,7 @@ static bool ivch_read(struct intel_dvo_device *dvo, int addr, uint16_t *data)
if (!priv->quiet) {
DRM_DEBUG("Unable to read register 0x%02x from %s:%02x.\n",
- addr, i2cbus->adapter.name, i2cbus->slave_addr);
+ addr, i2cbus->adapter.name, dvo->slave_addr);
}
return false;
}
@@ -211,10 +212,11 @@ static bool ivch_read(struct intel_dvo_device *dvo, int addr, uint16_t *data)
static bool ivch_write(struct intel_dvo_device *dvo, int addr, uint16_t data)
{
struct ivch_priv *priv = dvo->dev_priv;
- struct intel_i2c_chan *i2cbus = dvo->i2c_bus;
+ struct i2c_adapter *adapter = dvo->i2c_bus;
+ struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter);
u8 out_buf[3];
struct i2c_msg msg = {
- .addr = i2cbus->slave_addr,
+ .addr = dvo->slave_addr,
.flags = 0,
.len = 3,
.buf = out_buf,
@@ -229,7 +231,7 @@ static bool ivch_write(struct intel_dvo_device *dvo, int addr, uint16_t data)
if (!priv->quiet) {
DRM_DEBUG("Unable to write register 0x%02x to %s:%d.\n",
- addr, i2cbus->adapter.name, i2cbus->slave_addr);
+ addr, i2cbus->adapter.name, dvo->slave_addr);
}
return false;
@@ -237,7 +239,7 @@ static bool ivch_write(struct intel_dvo_device *dvo, int addr, uint16_t data)
/** Probes the given bus and slave address for an ivch */
static bool ivch_init(struct intel_dvo_device *dvo,
- struct intel_i2c_chan *i2cbus)
+ struct i2c_adapter *adapter)
{
struct ivch_priv *priv;
uint16_t temp;
@@ -246,8 +248,7 @@ static bool ivch_init(struct intel_dvo_device *dvo,
if (priv == NULL)
return false;
- dvo->i2c_bus = i2cbus;
- dvo->i2c_bus->slave_addr = dvo->slave_addr;
+ dvo->i2c_bus = adapter;
dvo->dev_priv = priv;
priv->quiet = true;
diff --git a/drivers/gpu/drm/i915/dvo_sil164.c b/drivers/gpu/drm/i915/dvo_sil164.c
index 033a4bb070b2..e1c1f7341e5c 100644
--- a/drivers/gpu/drm/i915/dvo_sil164.c
+++ b/drivers/gpu/drm/i915/dvo_sil164.c
@@ -76,19 +76,20 @@ struct sil164_priv {
static bool sil164_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
{
struct sil164_priv *sil = dvo->dev_priv;
- struct intel_i2c_chan *i2cbus = dvo->i2c_bus;
+ struct i2c_adapter *adapter = dvo->i2c_bus;
+ struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter);
u8 out_buf[2];
u8 in_buf[2];
struct i2c_msg msgs[] = {
{
- .addr = i2cbus->slave_addr,
+ .addr = dvo->slave_addr,
.flags = 0,
.len = 1,
.buf = out_buf,
},
{
- .addr = i2cbus->slave_addr,
+ .addr = dvo->slave_addr,
.flags = I2C_M_RD,
.len = 1,
.buf = in_buf,
@@ -105,7 +106,7 @@ static bool sil164_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
if (!sil->quiet) {
DRM_DEBUG("Unable to read register 0x%02x from %s:%02x.\n",
- addr, i2cbus->adapter.name, i2cbus->slave_addr);
+ addr, i2cbus->adapter.name, dvo->slave_addr);
}
return false;
}
@@ -113,10 +114,11 @@ static bool sil164_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
static bool sil164_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch)
{
struct sil164_priv *sil= dvo->dev_priv;
- struct intel_i2c_chan *i2cbus = dvo->i2c_bus;
+ struct i2c_adapter *adapter = dvo->i2c_bus;
+ struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter);
uint8_t out_buf[2];
struct i2c_msg msg = {
- .addr = i2cbus->slave_addr,
+ .addr = dvo->slave_addr,
.flags = 0,
.len = 2,
.buf = out_buf,
@@ -130,7 +132,7 @@ static bool sil164_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch)
if (!sil->quiet) {
DRM_DEBUG("Unable to write register 0x%02x to %s:%d.\n",
- addr, i2cbus->adapter.name, i2cbus->slave_addr);
+ addr, i2cbus->adapter.name, dvo->slave_addr);
}
return false;
@@ -138,7 +140,7 @@ static bool sil164_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch)
/* Silicon Image 164 driver for chip on i2c bus */
static bool sil164_init(struct intel_dvo_device *dvo,
- struct intel_i2c_chan *i2cbus)
+ struct i2c_adapter *adapter)
{
/* this will detect the SIL164 chip on the specified i2c bus */
struct sil164_priv *sil;
@@ -148,8 +150,7 @@ static bool sil164_init(struct intel_dvo_device *dvo,
if (sil == NULL)
return false;
- dvo->i2c_bus = i2cbus;
- dvo->i2c_bus->slave_addr = dvo->slave_addr;
+ dvo->i2c_bus = adapter;
dvo->dev_priv = sil;
sil->quiet = true;
@@ -158,7 +159,7 @@ static bool sil164_init(struct intel_dvo_device *dvo,
if (ch != (SIL164_VID & 0xff)) {
DRM_DEBUG("sil164 not detected got %d: from %s Slave %d.\n",
- ch, i2cbus->adapter.name, i2cbus->slave_addr);
+ ch, adapter->name, dvo->slave_addr);
goto out;
}
@@ -167,7 +168,7 @@ static bool sil164_init(struct intel_dvo_device *dvo,
if (ch != (SIL164_DID & 0xff)) {
DRM_DEBUG("sil164 not detected got %d: from %s Slave %d.\n",
- ch, i2cbus->adapter.name, i2cbus->slave_addr);
+ ch, adapter->name, dvo->slave_addr);
goto out;
}
sil->quiet = false;
diff --git a/drivers/gpu/drm/i915/dvo_tfp410.c b/drivers/gpu/drm/i915/dvo_tfp410.c
index 207fda806ebf..9ecc907384ec 100644
--- a/drivers/gpu/drm/i915/dvo_tfp410.c
+++ b/drivers/gpu/drm/i915/dvo_tfp410.c
@@ -101,19 +101,20 @@ struct tfp410_priv {
static bool tfp410_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
{
struct tfp410_priv *tfp = dvo->dev_priv;
- struct intel_i2c_chan *i2cbus = dvo->i2c_bus;
+ struct i2c_adapter *adapter = dvo->i2c_bus;
+ struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter);
u8 out_buf[2];
u8 in_buf[2];
struct i2c_msg msgs[] = {
{
- .addr = i2cbus->slave_addr,
+ .addr = dvo->slave_addr,
.flags = 0,
.len = 1,
.buf = out_buf,
},
{
- .addr = i2cbus->slave_addr,
+ .addr = dvo->slave_addr,
.flags = I2C_M_RD,
.len = 1,
.buf = in_buf,
@@ -130,7 +131,7 @@ static bool tfp410_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
if (!tfp->quiet) {
DRM_DEBUG("Unable to read register 0x%02x from %s:%02x.\n",
- addr, i2cbus->adapter.name, i2cbus->slave_addr);
+ addr, i2cbus->adapter.name, dvo->slave_addr);
}
return false;
}
@@ -138,10 +139,11 @@ static bool tfp410_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
static bool tfp410_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch)
{
struct tfp410_priv *tfp = dvo->dev_priv;
- struct intel_i2c_chan *i2cbus = dvo->i2c_bus;
+ struct i2c_adapter *adapter = dvo->i2c_bus;
+ struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter);
uint8_t out_buf[2];
struct i2c_msg msg = {
- .addr = i2cbus->slave_addr,
+ .addr = dvo->slave_addr,
.flags = 0,
.len = 2,
.buf = out_buf,
@@ -155,7 +157,7 @@ static bool tfp410_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch)
if (!tfp->quiet) {
DRM_DEBUG("Unable to write register 0x%02x to %s:%d.\n",
- addr, i2cbus->adapter.name, i2cbus->slave_addr);
+ addr, i2cbus->adapter.name, dvo->slave_addr);
}
return false;
@@ -174,7 +176,7 @@ static int tfp410_getid(struct intel_dvo_device *dvo, int addr)
/* Ti TFP410 driver for chip on i2c bus */
static bool tfp410_init(struct intel_dvo_device *dvo,
- struct intel_i2c_chan *i2cbus)
+ struct i2c_adapter *adapter)
{
/* this will detect the tfp410 chip on the specified i2c bus */
struct tfp410_priv *tfp;
@@ -184,20 +186,19 @@ static bool tfp410_init(struct intel_dvo_device *dvo,
if (tfp == NULL)
return false;
- dvo->i2c_bus = i2cbus;
- dvo->i2c_bus->slave_addr = dvo->slave_addr;
+ dvo->i2c_bus = adapter;
dvo->dev_priv = tfp;
tfp->quiet = true;
if ((id = tfp410_getid(dvo, TFP410_VID_LO)) != TFP410_VID) {
DRM_DEBUG("tfp410 not detected got VID %X: from %s Slave %d.\n",
- id, i2cbus->adapter.name, i2cbus->slave_addr);
+ id, adapter->name, dvo->slave_addr);
goto out;
}
if ((id = tfp410_getid(dvo, TFP410_DID_LO)) != TFP410_DID) {
DRM_DEBUG("tfp410 not detected got DID %X: from %s Slave %d.\n",
- id, i2cbus->adapter.name, i2cbus->slave_addr);
+ id, adapter->name, dvo->slave_addr);
goto out;
}
tfp->quiet = false;
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index f112c769d533..8c4783180bf6 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -846,7 +846,7 @@ static int i915_set_status_page(struct drm_device *dev, void *data,
return 0;
}
- printk(KERN_DEBUG "set status page addr 0x%08x\n", (u32)hws->addr);
+ DRM_DEBUG("set status page addr 0x%08x\n", (u32)hws->addr);
dev_priv->status_gfx_addr = hws->addr & (0x1ffff<<12);
@@ -885,8 +885,8 @@ static int i915_set_status_page(struct drm_device *dev, void *data,
* some RAM for the framebuffer at early boot. This code figures out
* how much was set aside so we can use it for our own purposes.
*/
-static int i915_probe_agp(struct drm_device *dev, unsigned long *aperture_size,
- unsigned long *preallocated_size)
+static int i915_probe_agp(struct drm_device *dev, uint32_t *aperture_size,
+ uint32_t *preallocated_size)
{
struct pci_dev *bridge_dev;
u16 tmp = 0;
@@ -984,10 +984,11 @@ static int i915_probe_agp(struct drm_device *dev, unsigned long *aperture_size,
return 0;
}
-static int i915_load_modeset_init(struct drm_device *dev)
+static int i915_load_modeset_init(struct drm_device *dev,
+ unsigned long prealloc_size,
+ unsigned long agp_size)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- unsigned long agp_size, prealloc_size;
int fb_bar = IS_I9XX(dev) ? 2 : 0;
int ret = 0;
@@ -1002,10 +1003,6 @@ static int i915_load_modeset_init(struct drm_device *dev)
if (IS_I965G(dev) || IS_G33(dev))
dev_priv->cursor_needs_physical = false;
- ret = i915_probe_agp(dev, &agp_size, &prealloc_size);
- if (ret)
- goto out;
-
/* Basic memrange allocator for stolen space (aka vram) */
drm_mm_init(&dev_priv->vram, 0, prealloc_size);
@@ -1082,6 +1079,44 @@ void i915_master_destroy(struct drm_device *dev, struct drm_master *master)
master->driver_priv = NULL;
}
+static void i915_get_mem_freq(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ u32 tmp;
+
+ if (!IS_IGD(dev))
+ return;
+
+ tmp = I915_READ(CLKCFG);
+
+ switch (tmp & CLKCFG_FSB_MASK) {
+ case CLKCFG_FSB_533:
+ dev_priv->fsb_freq = 533; /* 133*4 */
+ break;
+ case CLKCFG_FSB_800:
+ dev_priv->fsb_freq = 800; /* 200*4 */
+ break;
+ case CLKCFG_FSB_667:
+ dev_priv->fsb_freq = 667; /* 167*4 */
+ break;
+ case CLKCFG_FSB_400:
+ dev_priv->fsb_freq = 400; /* 100*4 */
+ break;
+ }
+
+ switch (tmp & CLKCFG_MEM_MASK) {
+ case CLKCFG_MEM_533:
+ dev_priv->mem_freq = 533;
+ break;
+ case CLKCFG_MEM_667:
+ dev_priv->mem_freq = 667;
+ break;
+ case CLKCFG_MEM_800:
+ dev_priv->mem_freq = 800;
+ break;
+ }
+}
+
/**
* i915_driver_load - setup chip and create an initial config
* @dev: DRM device
@@ -1098,6 +1133,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
struct drm_i915_private *dev_priv = dev->dev_private;
resource_size_t base, size;
int ret = 0, mmio_bar = IS_I9XX(dev) ? 0 : 1;
+ uint32_t agp_size, prealloc_size;
/* i915 has 4 more counters */
dev->counters += 4;
@@ -1146,9 +1182,22 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
"performance may suffer.\n");
}
+ ret = i915_probe_agp(dev, &agp_size, &prealloc_size);
+ if (ret)
+ goto out_iomapfree;
+
/* enable GEM by default */
dev_priv->has_gem = 1;
+ if (prealloc_size > agp_size * 3 / 4) {
+ DRM_ERROR("Detected broken video BIOS with %d/%dkB of video "
+ "memory stolen.\n",
+ prealloc_size / 1024, agp_size / 1024);
+ DRM_ERROR("Disabling GEM. (try reducing stolen memory or "
+ "updating the BIOS to fix).\n");
+ dev_priv->has_gem = 0;
+ }
+
dev->driver->get_vblank_counter = i915_get_vblank_counter;
dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
if (IS_G4X(dev) || IS_IGDNG(dev)) {
@@ -1165,6 +1214,8 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
goto out_iomapfree;
}
+ i915_get_mem_freq(dev);
+
/* On the 945G/GM, the chipset reports the MSI capability on the
* integrated graphics even though the support isn't actually there
* according to the published specs. It doesn't appear to function
@@ -1180,6 +1231,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
pci_enable_msi(dev->pdev);
spin_lock_init(&dev_priv->user_irq_lock);
+ spin_lock_init(&dev_priv->error_lock);
dev_priv->user_irq_refcount = 0;
ret = drm_vblank_init(dev, I915_NUM_PIPE);
@@ -1190,7 +1242,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
}
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
- ret = i915_load_modeset_init(dev);
+ ret = i915_load_modeset_init(dev, prealloc_size, agp_size);
if (ret < 0) {
DRM_ERROR("failed to init modeset\n");
goto out_rmmap;
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 98560e1e899a..fc4b68aa2d05 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -35,6 +35,7 @@
#include "drm_pciids.h"
#include <linux/console.h>
+#include "drm_crtc_helper.h"
static unsigned int i915_modeset = -1;
module_param_named(modeset, i915_modeset, int, 0400);
@@ -57,8 +58,8 @@ static int i915_suspend(struct drm_device *dev, pm_message_t state)
struct drm_i915_private *dev_priv = dev->dev_private;
if (!dev || !dev_priv) {
- printk(KERN_ERR "dev: %p, dev_priv: %p\n", dev, dev_priv);
- printk(KERN_ERR "DRM not initialized, aborting suspend.\n");
+ DRM_ERROR("dev: %p, dev_priv: %p\n", dev, dev_priv);
+ DRM_ERROR("DRM not initialized, aborting suspend.\n");
return -ENODEV;
}
@@ -67,8 +68,6 @@ static int i915_suspend(struct drm_device *dev, pm_message_t state)
pci_save_state(dev->pdev);
- i915_save_state(dev);
-
/* If KMS is active, we do the leavevt stuff here */
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
if (i915_gem_idle(dev))
@@ -77,6 +76,8 @@ static int i915_suspend(struct drm_device *dev, pm_message_t state)
drm_irq_uninstall(dev);
}
+ i915_save_state(dev);
+
intel_opregion_free(dev, 1);
if (state.event == PM_EVENT_SUSPEND) {
@@ -115,6 +116,10 @@ static int i915_resume(struct drm_device *dev)
drm_irq_install(dev);
}
+ if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+ /* Resume the modeset for every activated CRTC */
+ drm_helper_resume_force_mode(dev);
+ }
return ret;
}
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 7a84f04e8439..d08752875885 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -133,6 +133,22 @@ struct sdvo_device_mapping {
u8 initialized;
};
+struct drm_i915_error_state {
+ u32 eir;
+ u32 pgtbl_er;
+ u32 pipeastat;
+ u32 pipebstat;
+ u32 ipeir;
+ u32 ipehr;
+ u32 instdone;
+ u32 acthd;
+ u32 instpm;
+ u32 instps;
+ u32 instdone1;
+ u32 seqno;
+ struct timeval time;
+};
+
typedef struct drm_i915_private {
struct drm_device *dev;
@@ -209,6 +225,11 @@ typedef struct drm_i915_private {
int fence_reg_start; /* 4 if userland hasn't ioctl'd us yet */
int num_fence_regs; /* 8 on pre-965, 16 otherwise */
+ unsigned int fsb_freq, mem_freq;
+
+ spinlock_t error_lock;
+ struct drm_i915_error_state *first_error;
+
/* Register state */
u8 saveLBB;
u32 saveDSPACNTR;
@@ -306,6 +327,17 @@ typedef struct drm_i915_private {
u32 saveCURBPOS;
u32 saveCURBBASE;
u32 saveCURSIZE;
+ u32 saveDP_B;
+ u32 saveDP_C;
+ u32 saveDP_D;
+ u32 savePIPEA_GMCH_DATA_M;
+ u32 savePIPEB_GMCH_DATA_M;
+ u32 savePIPEA_GMCH_DATA_N;
+ u32 savePIPEB_GMCH_DATA_N;
+ u32 savePIPEA_DP_LINK_M;
+ u32 savePIPEB_DP_LINK_M;
+ u32 savePIPEA_DP_LINK_N;
+ u32 savePIPEB_DP_LINK_N;
struct {
struct drm_mm gtt_space;
@@ -457,9 +489,6 @@ struct drm_i915_gem_object {
*/
int fence_reg;
- /** Boolean whether this object has a valid gtt offset. */
- int gtt_bound;
-
/** How many users have pinned this object in GTT space */
int pin_count;
@@ -644,6 +673,7 @@ void i915_gem_free_object(struct drm_gem_object *obj);
int i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment);
void i915_gem_object_unpin(struct drm_gem_object *obj);
int i915_gem_object_unbind(struct drm_gem_object *obj);
+void i915_gem_release_mmap(struct drm_gem_object *obj);
void i915_gem_lastclose(struct drm_device *dev);
uint32_t i915_get_gem_seqno(struct drm_device *dev);
int i915_gem_object_get_fence_reg(struct drm_gem_object *obj);
@@ -857,7 +887,10 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
#define HAS_128_BYTE_Y_TILING(dev) (IS_I9XX(dev) && !(IS_I915G(dev) || \
IS_I915GM(dev)))
#define SUPPORTS_INTEGRATED_HDMI(dev) (IS_G4X(dev) || IS_IGDNG(dev))
+#define SUPPORTS_INTEGRATED_DP(dev) (IS_G4X(dev) || IS_IGDNG(dev))
#define I915_HAS_HOTPLUG(dev) (IS_I945G(dev) || IS_I945GM(dev) || IS_I965G(dev))
+/* dsparb controlled by hw only */
+#define DSPARB_HWCONTROL(dev) (IS_G4X(dev) || IS_IGDNG(dev))
#define PRIMARY_RINGBUFFER_SIZE (128*1024)
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index fd2b8bdffe3f..5bf420378b6d 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1006,7 +1006,7 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
mutex_lock(&dev->struct_mutex);
#if WATCH_BUF
- DRM_INFO("set_domain_ioctl %p(%d), %08x %08x\n",
+ DRM_INFO("set_domain_ioctl %p(%zd), %08x %08x\n",
obj, obj->size, read_domains, write_domain);
#endif
if (read_domains & I915_GEM_DOMAIN_GTT) {
@@ -1050,7 +1050,7 @@ i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
}
#if WATCH_BUF
- DRM_INFO("%s: sw_finish %d (%p %d)\n",
+ DRM_INFO("%s: sw_finish %d (%p %zd)\n",
__func__, args->handle, obj, obj->size);
#endif
obj_priv = obj->driver_private;
@@ -1252,6 +1252,31 @@ out_free_list:
return ret;
}
+/**
+ * i915_gem_release_mmap - remove physical page mappings
+ * @obj: obj in question
+ *
+ * Preserve the reservation of the mmaping with the DRM core code, but
+ * relinquish ownership of the pages back to the system.
+ *
+ * It is vital that we remove the page mapping if we have mapped a tiled
+ * object through the GTT and then lose the fence register due to
+ * resource pressure. Similarly if the object has been moved out of the
+ * aperture, than pages mapped into userspace must be revoked. Removing the
+ * mapping will then trigger a page fault on the next user access, allowing
+ * fixup by i915_gem_fault().
+ */
+void
+i915_gem_release_mmap(struct drm_gem_object *obj)
+{
+ struct drm_device *dev = obj->dev;
+ struct drm_i915_gem_object *obj_priv = obj->driver_private;
+
+ if (dev->dev_mapping)
+ unmap_mapping_range(dev->dev_mapping,
+ obj_priv->mmap_offset, obj->size, 1);
+}
+
static void
i915_gem_free_mmap_offset(struct drm_gem_object *obj)
{
@@ -1861,7 +1886,6 @@ i915_gem_object_unbind(struct drm_gem_object *obj)
{
struct drm_device *dev = obj->dev;
struct drm_i915_gem_object *obj_priv = obj->driver_private;
- loff_t offset;
int ret = 0;
#if WATCH_BUF
@@ -1898,9 +1922,7 @@ i915_gem_object_unbind(struct drm_gem_object *obj)
BUG_ON(obj_priv->active);
/* blow away mappings if mapped through GTT */
- offset = ((loff_t) obj->map_list.hash.key) << PAGE_SHIFT;
- if (dev->dev_mapping)
- unmap_mapping_range(dev->dev_mapping, offset, obj->size, 1);
+ i915_gem_release_mmap(obj);
if (obj_priv->fence_reg != I915_FENCE_REG_NONE)
i915_gem_clear_fence_reg(obj);
@@ -2222,7 +2244,6 @@ try_again:
/* None available, try to steal one or wait for a user to finish */
if (i == dev_priv->num_fence_regs) {
uint32_t seqno = dev_priv->mm.next_gem_seqno;
- loff_t offset;
if (avail == 0)
return -ENOSPC;
@@ -2274,10 +2295,7 @@ try_again:
* Zap this virtual mapping so we can set up a fence again
* for this object next time we need it.
*/
- offset = ((loff_t) reg->obj->map_list.hash.key) << PAGE_SHIFT;
- if (dev->dev_mapping)
- unmap_mapping_range(dev->dev_mapping, offset,
- reg->obj->size, 1);
+ i915_gem_release_mmap(reg->obj);
old_obj_priv->fence_reg = I915_FENCE_REG_NONE;
}
@@ -2423,7 +2441,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
}
#if WATCH_BUF
- DRM_INFO("Binding object of size %d at 0x%08x\n",
+ DRM_INFO("Binding object of size %zd at 0x%08x\n",
obj->size, obj_priv->gtt_offset);
#endif
ret = i915_gem_object_get_pages(obj);
@@ -4227,6 +4245,7 @@ i915_gem_lastclose(struct drm_device *dev)
void
i915_gem_load(struct drm_device *dev)
{
+ int i;
drm_i915_private_t *dev_priv = dev->dev_private;
spin_lock_init(&dev_priv->mm.active_list_lock);
@@ -4246,6 +4265,18 @@ i915_gem_load(struct drm_device *dev)
else
dev_priv->num_fence_regs = 8;
+ /* Initialize fence registers to zero */
+ if (IS_I965G(dev)) {
+ for (i = 0; i < 16; i++)
+ I915_WRITE64(FENCE_REG_965_0 + (i * 8), 0);
+ } else {
+ for (i = 0; i < 8; i++)
+ I915_WRITE(FENCE_REG_830_0 + (i * 4), 0);
+ if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
+ for (i = 0; i < 8; i++)
+ I915_WRITE(FENCE_REG_945_8 + (i * 4), 0);
+ }
+
i915_gem_detect_bit_6_swizzle(dev);
}
diff --git a/drivers/gpu/drm/i915/i915_gem_debug.c b/drivers/gpu/drm/i915/i915_gem_debug.c
index 8d0b943e2c5a..e602614bd3f8 100644
--- a/drivers/gpu/drm/i915/i915_gem_debug.c
+++ b/drivers/gpu/drm/i915/i915_gem_debug.c
@@ -87,7 +87,7 @@ i915_gem_dump_object(struct drm_gem_object *obj, int len,
chunk_len = page_len - chunk;
if (chunk_len > 128)
chunk_len = 128;
- i915_gem_dump_page(obj_priv->page_list[page],
+ i915_gem_dump_page(obj_priv->pages[page],
chunk, chunk + chunk_len,
obj_priv->gtt_offset +
page * PAGE_SIZE,
@@ -143,7 +143,7 @@ i915_gem_object_check_coherency(struct drm_gem_object *obj, int handle)
uint32_t *backing_map = NULL;
int bad_count = 0;
- DRM_INFO("%s: checking coherency of object %p@0x%08x (%d, %dkb):\n",
+ DRM_INFO("%s: checking coherency of object %p@0x%08x (%d, %zdkb):\n",
__func__, obj, obj_priv->gtt_offset, handle,
obj->size / 1024);
@@ -157,7 +157,7 @@ i915_gem_object_check_coherency(struct drm_gem_object *obj, int handle)
for (page = 0; page < obj->size / PAGE_SIZE; page++) {
int i;
- backing_map = kmap_atomic(obj_priv->page_list[page], KM_USER0);
+ backing_map = kmap_atomic(obj_priv->pages[page], KM_USER0);
if (backing_map == NULL) {
DRM_ERROR("failed to map backing page\n");
diff --git a/drivers/gpu/drm/i915/i915_gem_debugfs.c b/drivers/gpu/drm/i915/i915_gem_debugfs.c
index 28146e405e87..9a44bfcb8139 100644
--- a/drivers/gpu/drm/i915/i915_gem_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_gem_debugfs.c
@@ -75,11 +75,10 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data)
case ACTIVE_LIST:
seq_printf(m, "Active:\n");
lock = &dev_priv->mm.active_list_lock;
- spin_lock(lock);
head = &dev_priv->mm.active_list;
break;
case INACTIVE_LIST:
- seq_printf(m, "Inctive:\n");
+ seq_printf(m, "Inactive:\n");
head = &dev_priv->mm.inactive_list;
break;
case FLUSHING_LIST:
@@ -91,6 +90,8 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data)
return 0;
}
+ if (lock)
+ spin_lock(lock);
list_for_each_entry(obj_priv, head, list)
{
struct drm_gem_object *obj = obj_priv->obj;
@@ -104,7 +105,10 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data)
if (obj->name)
seq_printf(m, " (name: %d)", obj->name);
if (obj_priv->fence_reg != I915_FENCE_REG_NONE)
- seq_printf(m, " (fence: %d)\n", obj_priv->fence_reg);
+ seq_printf(m, " (fence: %d)", obj_priv->fence_reg);
+ if (obj_priv->gtt_space != NULL)
+ seq_printf(m, " (gtt_offset: %08x)", obj_priv->gtt_offset);
+
seq_printf(m, "\n");
}
@@ -323,6 +327,39 @@ static int i915_ringbuffer_info(struct seq_file *m, void *data)
return 0;
}
+static int i915_error_state(struct seq_file *m, void *unused)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_device *dev = node->minor->dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_error_state *error;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev_priv->error_lock, flags);
+ if (!dev_priv->first_error) {
+ seq_printf(m, "no error state collected\n");
+ goto out;
+ }
+
+ error = dev_priv->first_error;
+
+ seq_printf(m, "EIR: 0x%08x\n", error->eir);
+ seq_printf(m, " PGTBL_ER: 0x%08x\n", error->pgtbl_er);
+ seq_printf(m, " INSTPM: 0x%08x\n", error->instpm);
+ seq_printf(m, " IPEIR: 0x%08x\n", error->ipeir);
+ seq_printf(m, " IPEHR: 0x%08x\n", error->ipehr);
+ seq_printf(m, " INSTDONE: 0x%08x\n", error->instdone);
+ seq_printf(m, " ACTHD: 0x%08x\n", error->acthd);
+ if (IS_I965G(dev)) {
+ seq_printf(m, " INSTPS: 0x%08x\n", error->instps);
+ seq_printf(m, " INSTDONE1: 0x%08x\n", error->instdone1);
+ }
+
+out:
+ spin_unlock_irqrestore(&dev_priv->error_lock, flags);
+
+ return 0;
+}
static struct drm_info_list i915_gem_debugfs_list[] = {
{"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST},
@@ -336,6 +373,7 @@ static struct drm_info_list i915_gem_debugfs_list[] = {
{"i915_ringbuffer_data", i915_ringbuffer_data, 0},
{"i915_ringbuffer_info", i915_ringbuffer_info, 0},
{"i915_batchbuffers", i915_batchbuffer_info, 0},
+ {"i915_error_state", i915_error_state, 0},
};
#define I915_GEM_DEBUGFS_ENTRIES ARRAY_SIZE(i915_gem_debugfs_list)
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index 5c1ceec49f5b..a2d527b22ec4 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -114,11 +114,13 @@ intel_alloc_mchbar_resource(struct drm_device *dev)
mchbar_addr = ((u64)temp_hi << 32) | temp_lo;
/* If ACPI doesn't have it, assume we need to allocate it ourselves */
+#ifdef CONFIG_PNP
if (mchbar_addr &&
pnp_range_reserved(mchbar_addr, mchbar_addr + MCHBAR_SIZE)) {
ret = 0;
goto out_put;
}
+#endif
/* Get some space for it */
ret = pci_bus_alloc_resource(bridge_dev->bus, &dev_priv->mch_res,
@@ -519,6 +521,12 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
goto err;
}
+ /* If we've changed tiling, GTT-mappings of the object
+ * need to re-fault to ensure that the correct fence register
+ * setup is in place.
+ */
+ i915_gem_release_mmap(obj);
+
obj_priv->tiling_mode = args->tiling_mode;
obj_priv->stride = args->stride;
}
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index b86b7b7130c6..7ba23a69a0c0 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -26,6 +26,7 @@
*
*/
+#include <linux/sysrq.h>
#include "drmP.h"
#include "drm.h"
#include "i915_drm.h"
@@ -41,9 +42,10 @@
* we leave them always unmasked in IMR and then control enabling them through
* PIPESTAT alone.
*/
-#define I915_INTERRUPT_ENABLE_FIX (I915_ASLE_INTERRUPT | \
- I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | \
- I915_DISPLAY_PIPE_B_EVENT_INTERRUPT)
+#define I915_INTERRUPT_ENABLE_FIX (I915_ASLE_INTERRUPT | \
+ I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | \
+ I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | \
+ I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
/** Interrupts that we mask and unmask at runtime. */
#define I915_INTERRUPT_ENABLE_VAR (I915_USER_INTERRUPT)
@@ -232,7 +234,17 @@ static void i915_hotplug_work_func(struct work_struct *work)
drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
hotplug_work);
struct drm_device *dev = dev_priv->dev;
-
+ struct drm_mode_config *mode_config = &dev->mode_config;
+ struct drm_connector *connector;
+
+ if (mode_config->num_connector) {
+ list_for_each_entry(connector, &mode_config->connector_list, head) {
+ struct intel_output *intel_output = to_intel_output(connector);
+
+ if (intel_output->hot_plug)
+ (*intel_output->hot_plug) (intel_output);
+ }
+ }
/* Just fire off a uevent and let userspace tell us what to do */
drm_sysfs_hotplug_event(dev);
}
@@ -278,6 +290,47 @@ irqreturn_t igdng_irq_handler(struct drm_device *dev)
return ret;
}
+static void i915_capture_error_state(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_error_state *error;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev_priv->error_lock, flags);
+ if (dev_priv->first_error)
+ goto out;
+
+ error = kmalloc(sizeof(*error), GFP_ATOMIC);
+ if (!error) {
+ DRM_DEBUG("out ot memory, not capturing error state\n");
+ goto out;
+ }
+
+ error->eir = I915_READ(EIR);
+ error->pgtbl_er = I915_READ(PGTBL_ER);
+ error->pipeastat = I915_READ(PIPEASTAT);
+ error->pipebstat = I915_READ(PIPEBSTAT);
+ error->instpm = I915_READ(INSTPM);
+ if (!IS_I965G(dev)) {
+ error->ipeir = I915_READ(IPEIR);
+ error->ipehr = I915_READ(IPEHR);
+ error->instdone = I915_READ(INSTDONE);
+ error->acthd = I915_READ(ACTHD);
+ } else {
+ error->ipeir = I915_READ(IPEIR_I965);
+ error->ipehr = I915_READ(IPEHR_I965);
+ error->instdone = I915_READ(INSTDONE_I965);
+ error->instps = I915_READ(INSTPS);
+ error->instdone1 = I915_READ(INSTDONE1);
+ error->acthd = I915_READ(ACTHD_I965);
+ }
+
+ dev_priv->first_error = error;
+
+out:
+ spin_unlock_irqrestore(&dev_priv->error_lock, flags);
+}
+
irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
{
struct drm_device *dev = (struct drm_device *) arg;
@@ -323,11 +376,15 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
* Clear the PIPE(A|B)STAT regs before the IIR
*/
if (pipea_stats & 0x8000ffff) {
+ if (pipea_stats & PIPE_FIFO_UNDERRUN_STATUS)
+ DRM_DEBUG("pipe a underrun\n");
I915_WRITE(PIPEASTAT, pipea_stats);
irq_received = 1;
}
if (pipeb_stats & 0x8000ffff) {
+ if (pipeb_stats & PIPE_FIFO_UNDERRUN_STATUS)
+ DRM_DEBUG("pipe b underrun\n");
I915_WRITE(PIPEBSTAT, pipeb_stats);
irq_received = 1;
}
@@ -352,6 +409,80 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
I915_READ(PORT_HOTPLUG_STAT);
}
+ if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) {
+ u32 eir = I915_READ(EIR);
+
+ i915_capture_error_state(dev);
+
+ printk(KERN_ERR "render error detected, EIR: 0x%08x\n",
+ eir);
+ if (eir & I915_ERROR_PAGE_TABLE) {
+ u32 pgtbl_err = I915_READ(PGTBL_ER);
+ printk(KERN_ERR "page table error\n");
+ printk(KERN_ERR " PGTBL_ER: 0x%08x\n",
+ pgtbl_err);
+ I915_WRITE(PGTBL_ER, pgtbl_err);
+ (void)I915_READ(PGTBL_ER);
+ }
+ if (eir & I915_ERROR_MEMORY_REFRESH) {
+ printk(KERN_ERR "memory refresh error\n");
+ printk(KERN_ERR "PIPEASTAT: 0x%08x\n",
+ pipea_stats);
+ printk(KERN_ERR "PIPEBSTAT: 0x%08x\n",
+ pipeb_stats);
+ /* pipestat has already been acked */
+ }
+ if (eir & I915_ERROR_INSTRUCTION) {
+ printk(KERN_ERR "instruction error\n");
+ printk(KERN_ERR " INSTPM: 0x%08x\n",
+ I915_READ(INSTPM));
+ if (!IS_I965G(dev)) {
+ u32 ipeir = I915_READ(IPEIR);
+
+ printk(KERN_ERR " IPEIR: 0x%08x\n",
+ I915_READ(IPEIR));
+ printk(KERN_ERR " IPEHR: 0x%08x\n",
+ I915_READ(IPEHR));
+ printk(KERN_ERR " INSTDONE: 0x%08x\n",
+ I915_READ(INSTDONE));
+ printk(KERN_ERR " ACTHD: 0x%08x\n",
+ I915_READ(ACTHD));
+ I915_WRITE(IPEIR, ipeir);
+ (void)I915_READ(IPEIR);
+ } else {
+ u32 ipeir = I915_READ(IPEIR_I965);
+
+ printk(KERN_ERR " IPEIR: 0x%08x\n",
+ I915_READ(IPEIR_I965));
+ printk(KERN_ERR " IPEHR: 0x%08x\n",
+ I915_READ(IPEHR_I965));
+ printk(KERN_ERR " INSTDONE: 0x%08x\n",
+ I915_READ(INSTDONE_I965));
+ printk(KERN_ERR " INSTPS: 0x%08x\n",
+ I915_READ(INSTPS));
+ printk(KERN_ERR " INSTDONE1: 0x%08x\n",
+ I915_READ(INSTDONE1));
+ printk(KERN_ERR " ACTHD: 0x%08x\n",
+ I915_READ(ACTHD_I965));
+ I915_WRITE(IPEIR_I965, ipeir);
+ (void)I915_READ(IPEIR_I965);
+ }
+ }
+
+ I915_WRITE(EIR, eir);
+ (void)I915_READ(EIR);
+ eir = I915_READ(EIR);
+ if (eir) {
+ /*
+ * some errors might have become stuck,
+ * mask them.
+ */
+ DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir);
+ I915_WRITE(EMR, I915_READ(EMR) | eir);
+ I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
+ }
+ }
+
I915_WRITE(IIR, iir);
new_iir = I915_READ(IIR); /* Flush posted writes */
@@ -722,6 +853,7 @@ int i915_driver_irq_postinstall(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
u32 enable_mask = I915_INTERRUPT_ENABLE_FIX | I915_INTERRUPT_ENABLE_VAR;
+ u32 error_mask;
DRM_INIT_WAITQUEUE(&dev_priv->irq_queue);
@@ -758,6 +890,21 @@ int i915_driver_irq_postinstall(struct drm_device *dev)
i915_enable_irq(dev_priv, I915_DISPLAY_PORT_INTERRUPT);
}
+ /*
+ * Enable some error detection, note the instruction error mask
+ * bit is reserved, so we leave it masked.
+ */
+ if (IS_G4X(dev)) {
+ error_mask = ~(GM45_ERROR_PAGE_TABLE |
+ GM45_ERROR_MEM_PRIV |
+ GM45_ERROR_CP_PRIV |
+ I915_ERROR_MEMORY_REFRESH);
+ } else {
+ error_mask = ~(I915_ERROR_PAGE_TABLE |
+ I915_ERROR_MEMORY_REFRESH);
+ }
+ I915_WRITE(EMR, error_mask);
+
/* Disable pipe interrupt enables, clear pending pipe status */
I915_WRITE(PIPEASTAT, I915_READ(PIPEASTAT) & 0x8000ffff);
I915_WRITE(PIPEBSTAT, I915_READ(PIPEBSTAT) & 0x8000ffff);
diff --git a/drivers/gpu/drm/i915/i915_opregion.c b/drivers/gpu/drm/i915/i915_opregion.c
index dc425e74a268..e4b4e8898e39 100644
--- a/drivers/gpu/drm/i915/i915_opregion.c
+++ b/drivers/gpu/drm/i915/i915_opregion.c
@@ -419,7 +419,7 @@ void intel_opregion_free(struct drm_device *dev, int suspend)
return;
if (!suspend)
- acpi_video_exit();
+ acpi_video_unregister();
opregion->acpi->drdy = 0;
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index f6237a0b1133..6c0858484094 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -206,6 +206,7 @@
/*
* Instruction and interrupt control regs
*/
+#define PGTBL_ER 0x02024
#define PRB0_TAIL 0x02030
#define PRB0_HEAD 0x02034
#define PRB0_START 0x02038
@@ -226,11 +227,18 @@
#define PRB1_HEAD 0x02044 /* 915+ only */
#define PRB1_START 0x02048 /* 915+ only */
#define PRB1_CTL 0x0204c /* 915+ only */
+#define IPEIR_I965 0x02064
+#define IPEHR_I965 0x02068
+#define INSTDONE_I965 0x0206c
+#define INSTPS 0x02070 /* 965+ only */
+#define INSTDONE1 0x0207c /* 965+ only */
#define ACTHD_I965 0x02074
#define HWS_PGA 0x02080
#define HWS_ADDRESS_MASK 0xfffff000
#define HWS_START_ADDRESS_SHIFT 4
#define IPEIR 0x02088
+#define IPEHR 0x0208c
+#define INSTDONE 0x02090
#define NOPID 0x02094
#define HWSTAM 0x02098
#define SCPD0 0x0209c /* 915+ only */
@@ -258,10 +266,22 @@
#define EIR 0x020b0
#define EMR 0x020b4
#define ESR 0x020b8
+#define GM45_ERROR_PAGE_TABLE (1<<5)
+#define GM45_ERROR_MEM_PRIV (1<<4)
+#define I915_ERROR_PAGE_TABLE (1<<4)
+#define GM45_ERROR_CP_PRIV (1<<3)
+#define I915_ERROR_MEMORY_REFRESH (1<<1)
+#define I915_ERROR_INSTRUCTION (1<<0)
#define INSTPM 0x020c0
#define ACTHD 0x020c8
#define FW_BLC 0x020d8
+#define FW_BLC2 0x020dc
#define FW_BLC_SELF 0x020e0 /* 915+ only */
+#define FW_BLC_SELF_EN (1<<15)
+#define MM_BURST_LENGTH 0x00700000
+#define MM_FIFO_WATERMARK 0x0001F000
+#define LM_BURST_LENGTH 0x00000700
+#define LM_FIFO_WATERMARK 0x0000001F
#define MI_ARB_STATE 0x020e4 /* 915+ only */
#define CACHE_MODE_0 0x02120 /* 915+ only */
#define CM0_MASK_SHIFT 16
@@ -569,6 +589,23 @@
#define C0DRB3 0x10206
#define C1DRB3 0x10606
+/* Clocking configuration register */
+#define CLKCFG 0x10c00
+#define CLKCFG_FSB_400 (5 << 0) /* hrawclk 100 */
+#define CLKCFG_FSB_533 (1 << 0) /* hrawclk 133 */
+#define CLKCFG_FSB_667 (3 << 0) /* hrawclk 166 */
+#define CLKCFG_FSB_800 (2 << 0) /* hrawclk 200 */
+#define CLKCFG_FSB_1067 (6 << 0) /* hrawclk 266 */
+#define CLKCFG_FSB_1333 (7 << 0) /* hrawclk 333 */
+/* Note, below two are guess */
+#define CLKCFG_FSB_1600 (4 << 0) /* hrawclk 400 */
+#define CLKCFG_FSB_1600_ALT (0 << 0) /* hrawclk 400 */
+#define CLKCFG_FSB_MASK (7 << 0)
+#define CLKCFG_MEM_533 (1 << 4)
+#define CLKCFG_MEM_667 (2 << 4)
+#define CLKCFG_MEM_800 (3 << 4)
+#define CLKCFG_MEM_MASK (7 << 4)
+
/** GM965 GM45 render standby register */
#define MCHBAR_RENDER_STANDBY 0x111B8
@@ -834,9 +871,25 @@
#define HORIZ_INTERP_MASK (3 << 6)
#define HORIZ_AUTO_SCALE (1 << 5)
#define PANEL_8TO6_DITHER_ENABLE (1 << 3)
+#define PFIT_FILTER_FUZZY (0 << 24)
+#define PFIT_SCALING_AUTO (0 << 26)
+#define PFIT_SCALING_PROGRAMMED (1 << 26)
+#define PFIT_SCALING_PILLAR (2 << 26)
+#define PFIT_SCALING_LETTER (3 << 26)
#define PFIT_PGM_RATIOS 0x61234
#define PFIT_VERT_SCALE_MASK 0xfff00000
#define PFIT_HORIZ_SCALE_MASK 0x0000fff0
+/* Pre-965 */
+#define PFIT_VERT_SCALE_SHIFT 20
+#define PFIT_VERT_SCALE_MASK 0xfff00000
+#define PFIT_HORIZ_SCALE_SHIFT 4
+#define PFIT_HORIZ_SCALE_MASK 0x0000fff0
+/* 965+ */
+#define PFIT_VERT_SCALE_SHIFT_965 16
+#define PFIT_VERT_SCALE_MASK_965 0x1fff0000
+#define PFIT_HORIZ_SCALE_SHIFT_965 0
+#define PFIT_HORIZ_SCALE_MASK_965 0x00001fff
+
#define PFIT_AUTO_RATIOS 0x61238
/* Backlight control */
@@ -1552,6 +1605,34 @@
#define DSPARB_CSTART_SHIFT 7
#define DSPARB_BSTART_MASK (0x7f)
#define DSPARB_BSTART_SHIFT 0
+#define DSPARB_BEND_SHIFT 9 /* on 855 */
+#define DSPARB_AEND_SHIFT 0
+
+#define DSPFW1 0x70034
+#define DSPFW2 0x70038
+#define DSPFW3 0x7003c
+#define IGD_SELF_REFRESH_EN (1<<30)
+
+/* FIFO watermark sizes etc */
+#define I915_FIFO_LINE_SIZE 64
+#define I830_FIFO_LINE_SIZE 32
+#define I945_FIFO_SIZE 127 /* 945 & 965 */
+#define I915_FIFO_SIZE 95
+#define I855GM_FIFO_SIZE 255
+#define I830_FIFO_SIZE 95
+#define I915_MAX_WM 0x3f
+
+#define IGD_DISPLAY_FIFO 512 /* in 64byte unit */
+#define IGD_FIFO_LINE_SIZE 64
+#define IGD_MAX_WM 0x1ff
+#define IGD_DFT_WM 0x3f
+#define IGD_DFT_HPLLOFF_WM 0
+#define IGD_GUARD_WM 10
+#define IGD_CURSOR_FIFO 64
+#define IGD_CURSOR_MAX_WM 0x3f
+#define IGD_CURSOR_DFT_WM 0
+#define IGD_CURSOR_GUARD_WM 5
+
/*
* The two pipe frame counter registers are not synchronized, so
* reading a stable value is somewhat tricky. The following code
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index a98e2831ed31..9e1d16e5c3ea 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -222,23 +222,12 @@ static void i915_restore_vga(struct drm_device *dev)
I915_WRITE8(VGA_DACMASK, dev_priv->saveDACMASK);
}
-int i915_save_state(struct drm_device *dev)
+static void i915_save_modeset_reg(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- int i;
-
- pci_read_config_byte(dev->pdev, LBB, &dev_priv->saveLBB);
-
- /* Render Standby */
- if (IS_I965G(dev) && IS_MOBILE(dev))
- dev_priv->saveRENDERSTANDBY = I915_READ(MCHBAR_RENDER_STANDBY);
-
- /* Hardware status page */
- dev_priv->saveHWS = I915_READ(HWS_PGA);
-
- /* Display arbitration control */
- dev_priv->saveDSPARB = I915_READ(DSPARB);
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ return;
/* Pipe & plane A info */
dev_priv->savePIPEACONF = I915_READ(PIPEACONF);
dev_priv->savePIPEASRC = I915_READ(PIPEASRC);
@@ -294,7 +283,122 @@ int i915_save_state(struct drm_device *dev)
}
i915_save_palette(dev, PIPE_B);
dev_priv->savePIPEBSTAT = I915_READ(PIPEBSTAT);
+ return;
+}
+static void i915_restore_modeset_reg(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ return;
+
+ /* Pipe & plane A info */
+ /* Prime the clock */
+ if (dev_priv->saveDPLL_A & DPLL_VCO_ENABLE) {
+ I915_WRITE(DPLL_A, dev_priv->saveDPLL_A &
+ ~DPLL_VCO_ENABLE);
+ DRM_UDELAY(150);
+ }
+ I915_WRITE(FPA0, dev_priv->saveFPA0);
+ I915_WRITE(FPA1, dev_priv->saveFPA1);
+ /* Actually enable it */
+ I915_WRITE(DPLL_A, dev_priv->saveDPLL_A);
+ DRM_UDELAY(150);
+ if (IS_I965G(dev))
+ I915_WRITE(DPLL_A_MD, dev_priv->saveDPLL_A_MD);
+ DRM_UDELAY(150);
+
+ /* Restore mode */
+ I915_WRITE(HTOTAL_A, dev_priv->saveHTOTAL_A);
+ I915_WRITE(HBLANK_A, dev_priv->saveHBLANK_A);
+ I915_WRITE(HSYNC_A, dev_priv->saveHSYNC_A);
+ I915_WRITE(VTOTAL_A, dev_priv->saveVTOTAL_A);
+ I915_WRITE(VBLANK_A, dev_priv->saveVBLANK_A);
+ I915_WRITE(VSYNC_A, dev_priv->saveVSYNC_A);
+ I915_WRITE(BCLRPAT_A, dev_priv->saveBCLRPAT_A);
+
+ /* Restore plane info */
+ I915_WRITE(DSPASIZE, dev_priv->saveDSPASIZE);
+ I915_WRITE(DSPAPOS, dev_priv->saveDSPAPOS);
+ I915_WRITE(PIPEASRC, dev_priv->savePIPEASRC);
+ I915_WRITE(DSPAADDR, dev_priv->saveDSPAADDR);
+ I915_WRITE(DSPASTRIDE, dev_priv->saveDSPASTRIDE);
+ if (IS_I965G(dev)) {
+ I915_WRITE(DSPASURF, dev_priv->saveDSPASURF);
+ I915_WRITE(DSPATILEOFF, dev_priv->saveDSPATILEOFF);
+ }
+
+ I915_WRITE(PIPEACONF, dev_priv->savePIPEACONF);
+
+ i915_restore_palette(dev, PIPE_A);
+ /* Enable the plane */
+ I915_WRITE(DSPACNTR, dev_priv->saveDSPACNTR);
+ I915_WRITE(DSPAADDR, I915_READ(DSPAADDR));
+
+ /* Pipe & plane B info */
+ if (dev_priv->saveDPLL_B & DPLL_VCO_ENABLE) {
+ I915_WRITE(DPLL_B, dev_priv->saveDPLL_B &
+ ~DPLL_VCO_ENABLE);
+ DRM_UDELAY(150);
+ }
+ I915_WRITE(FPB0, dev_priv->saveFPB0);
+ I915_WRITE(FPB1, dev_priv->saveFPB1);
+ /* Actually enable it */
+ I915_WRITE(DPLL_B, dev_priv->saveDPLL_B);
+ DRM_UDELAY(150);
+ if (IS_I965G(dev))
+ I915_WRITE(DPLL_B_MD, dev_priv->saveDPLL_B_MD);
+ DRM_UDELAY(150);
+
+ /* Restore mode */
+ I915_WRITE(HTOTAL_B, dev_priv->saveHTOTAL_B);
+ I915_WRITE(HBLANK_B, dev_priv->saveHBLANK_B);
+ I915_WRITE(HSYNC_B, dev_priv->saveHSYNC_B);
+ I915_WRITE(VTOTAL_B, dev_priv->saveVTOTAL_B);
+ I915_WRITE(VBLANK_B, dev_priv->saveVBLANK_B);
+ I915_WRITE(VSYNC_B, dev_priv->saveVSYNC_B);
+ I915_WRITE(BCLRPAT_B, dev_priv->saveBCLRPAT_B);
+
+ /* Restore plane info */
+ I915_WRITE(DSPBSIZE, dev_priv->saveDSPBSIZE);
+ I915_WRITE(DSPBPOS, dev_priv->saveDSPBPOS);
+ I915_WRITE(PIPEBSRC, dev_priv->savePIPEBSRC);
+ I915_WRITE(DSPBADDR, dev_priv->saveDSPBADDR);
+ I915_WRITE(DSPBSTRIDE, dev_priv->saveDSPBSTRIDE);
+ if (IS_I965G(dev)) {
+ I915_WRITE(DSPBSURF, dev_priv->saveDSPBSURF);
+ I915_WRITE(DSPBTILEOFF, dev_priv->saveDSPBTILEOFF);
+ }
+
+ I915_WRITE(PIPEBCONF, dev_priv->savePIPEBCONF);
+
+ i915_restore_palette(dev, PIPE_B);
+ /* Enable the plane */
+ I915_WRITE(DSPBCNTR, dev_priv->saveDSPBCNTR);
+ I915_WRITE(DSPBADDR, I915_READ(DSPBADDR));
+ return;
+}
+int i915_save_state(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int i;
+
+ pci_read_config_byte(dev->pdev, LBB, &dev_priv->saveLBB);
+
+ /* Render Standby */
+ if (IS_I965G(dev) && IS_MOBILE(dev))
+ dev_priv->saveRENDERSTANDBY = I915_READ(MCHBAR_RENDER_STANDBY);
+
+ /* Hardware status page */
+ dev_priv->saveHWS = I915_READ(HWS_PGA);
+
+ /* Display arbitration control */
+ dev_priv->saveDSPARB = I915_READ(DSPARB);
+
+ /* This is only meaningful in non-KMS mode */
+ /* Don't save them in KMS mode */
+ i915_save_modeset_reg(dev);
/* Cursor state */
dev_priv->saveCURACNTR = I915_READ(CURACNTR);
dev_priv->saveCURAPOS = I915_READ(CURAPOS);
@@ -322,6 +426,20 @@ int i915_save_state(struct drm_device *dev)
dev_priv->savePP_OFF_DELAYS = I915_READ(PP_OFF_DELAYS);
dev_priv->savePP_DIVISOR = I915_READ(PP_DIVISOR);
+ /* Display Port state */
+ if (SUPPORTS_INTEGRATED_DP(dev)) {
+ dev_priv->saveDP_B = I915_READ(DP_B);
+ dev_priv->saveDP_C = I915_READ(DP_C);
+ dev_priv->saveDP_D = I915_READ(DP_D);
+ dev_priv->savePIPEA_GMCH_DATA_M = I915_READ(PIPEA_GMCH_DATA_M);
+ dev_priv->savePIPEB_GMCH_DATA_M = I915_READ(PIPEB_GMCH_DATA_M);
+ dev_priv->savePIPEA_GMCH_DATA_N = I915_READ(PIPEA_GMCH_DATA_N);
+ dev_priv->savePIPEB_GMCH_DATA_N = I915_READ(PIPEB_GMCH_DATA_N);
+ dev_priv->savePIPEA_DP_LINK_M = I915_READ(PIPEA_DP_LINK_M);
+ dev_priv->savePIPEB_DP_LINK_M = I915_READ(PIPEB_DP_LINK_M);
+ dev_priv->savePIPEA_DP_LINK_N = I915_READ(PIPEA_DP_LINK_N);
+ dev_priv->savePIPEB_DP_LINK_N = I915_READ(PIPEB_DP_LINK_N);
+ }
/* FIXME: save TV & SDVO state */
/* FBC state */
@@ -404,92 +522,21 @@ int i915_restore_state(struct drm_device *dev)
for (i = 0; i < 8; i++)
I915_WRITE(FENCE_REG_945_8 + (i * 4), dev_priv->saveFENCE[i+8]);
}
-
- /* Pipe & plane A info */
- /* Prime the clock */
- if (dev_priv->saveDPLL_A & DPLL_VCO_ENABLE) {
- I915_WRITE(DPLL_A, dev_priv->saveDPLL_A &
- ~DPLL_VCO_ENABLE);
- DRM_UDELAY(150);
+
+ /* Display port ratios (must be done before clock is set) */
+ if (SUPPORTS_INTEGRATED_DP(dev)) {
+ I915_WRITE(PIPEA_GMCH_DATA_M, dev_priv->savePIPEA_GMCH_DATA_M);
+ I915_WRITE(PIPEB_GMCH_DATA_M, dev_priv->savePIPEB_GMCH_DATA_M);
+ I915_WRITE(PIPEA_GMCH_DATA_N, dev_priv->savePIPEA_GMCH_DATA_N);
+ I915_WRITE(PIPEB_GMCH_DATA_N, dev_priv->savePIPEB_GMCH_DATA_N);
+ I915_WRITE(PIPEA_DP_LINK_M, dev_priv->savePIPEA_DP_LINK_M);
+ I915_WRITE(PIPEB_DP_LINK_M, dev_priv->savePIPEB_DP_LINK_M);
+ I915_WRITE(PIPEA_DP_LINK_N, dev_priv->savePIPEA_DP_LINK_N);
+ I915_WRITE(PIPEB_DP_LINK_N, dev_priv->savePIPEB_DP_LINK_N);
}
- I915_WRITE(FPA0, dev_priv->saveFPA0);
- I915_WRITE(FPA1, dev_priv->saveFPA1);
- /* Actually enable it */
- I915_WRITE(DPLL_A, dev_priv->saveDPLL_A);
- DRM_UDELAY(150);
- if (IS_I965G(dev))
- I915_WRITE(DPLL_A_MD, dev_priv->saveDPLL_A_MD);
- DRM_UDELAY(150);
-
- /* Restore mode */
- I915_WRITE(HTOTAL_A, dev_priv->saveHTOTAL_A);
- I915_WRITE(HBLANK_A, dev_priv->saveHBLANK_A);
- I915_WRITE(HSYNC_A, dev_priv->saveHSYNC_A);
- I915_WRITE(VTOTAL_A, dev_priv->saveVTOTAL_A);
- I915_WRITE(VBLANK_A, dev_priv->saveVBLANK_A);
- I915_WRITE(VSYNC_A, dev_priv->saveVSYNC_A);
- I915_WRITE(BCLRPAT_A, dev_priv->saveBCLRPAT_A);
-
- /* Restore plane info */
- I915_WRITE(DSPASIZE, dev_priv->saveDSPASIZE);
- I915_WRITE(DSPAPOS, dev_priv->saveDSPAPOS);
- I915_WRITE(PIPEASRC, dev_priv->savePIPEASRC);
- I915_WRITE(DSPAADDR, dev_priv->saveDSPAADDR);
- I915_WRITE(DSPASTRIDE, dev_priv->saveDSPASTRIDE);
- if (IS_I965G(dev)) {
- I915_WRITE(DSPASURF, dev_priv->saveDSPASURF);
- I915_WRITE(DSPATILEOFF, dev_priv->saveDSPATILEOFF);
- }
-
- I915_WRITE(PIPEACONF, dev_priv->savePIPEACONF);
-
- i915_restore_palette(dev, PIPE_A);
- /* Enable the plane */
- I915_WRITE(DSPACNTR, dev_priv->saveDSPACNTR);
- I915_WRITE(DSPAADDR, I915_READ(DSPAADDR));
-
- /* Pipe & plane B info */
- if (dev_priv->saveDPLL_B & DPLL_VCO_ENABLE) {
- I915_WRITE(DPLL_B, dev_priv->saveDPLL_B &
- ~DPLL_VCO_ENABLE);
- DRM_UDELAY(150);
- }
- I915_WRITE(FPB0, dev_priv->saveFPB0);
- I915_WRITE(FPB1, dev_priv->saveFPB1);
- /* Actually enable it */
- I915_WRITE(DPLL_B, dev_priv->saveDPLL_B);
- DRM_UDELAY(150);
- if (IS_I965G(dev))
- I915_WRITE(DPLL_B_MD, dev_priv->saveDPLL_B_MD);
- DRM_UDELAY(150);
-
- /* Restore mode */
- I915_WRITE(HTOTAL_B, dev_priv->saveHTOTAL_B);
- I915_WRITE(HBLANK_B, dev_priv->saveHBLANK_B);
- I915_WRITE(HSYNC_B, dev_priv->saveHSYNC_B);
- I915_WRITE(VTOTAL_B, dev_priv->saveVTOTAL_B);
- I915_WRITE(VBLANK_B, dev_priv->saveVBLANK_B);
- I915_WRITE(VSYNC_B, dev_priv->saveVSYNC_B);
- I915_WRITE(BCLRPAT_B, dev_priv->saveBCLRPAT_B);
-
- /* Restore plane info */
- I915_WRITE(DSPBSIZE, dev_priv->saveDSPBSIZE);
- I915_WRITE(DSPBPOS, dev_priv->saveDSPBPOS);
- I915_WRITE(PIPEBSRC, dev_priv->savePIPEBSRC);
- I915_WRITE(DSPBADDR, dev_priv->saveDSPBADDR);
- I915_WRITE(DSPBSTRIDE, dev_priv->saveDSPBSTRIDE);
- if (IS_I965G(dev)) {
- I915_WRITE(DSPBSURF, dev_priv->saveDSPBSURF);
- I915_WRITE(DSPBTILEOFF, dev_priv->saveDSPBTILEOFF);
- }
-
- I915_WRITE(PIPEBCONF, dev_priv->savePIPEBCONF);
-
- i915_restore_palette(dev, PIPE_B);
- /* Enable the plane */
- I915_WRITE(DSPBCNTR, dev_priv->saveDSPBCNTR);
- I915_WRITE(DSPBADDR, I915_READ(DSPBADDR));
-
+ /* This is only meaningful in non-KMS mode */
+ /* Don't restore them in KMS mode */
+ i915_restore_modeset_reg(dev);
/* Cursor state */
I915_WRITE(CURAPOS, dev_priv->saveCURAPOS);
I915_WRITE(CURACNTR, dev_priv->saveCURACNTR);
@@ -518,6 +565,12 @@ int i915_restore_state(struct drm_device *dev)
I915_WRITE(PP_DIVISOR, dev_priv->savePP_DIVISOR);
I915_WRITE(PP_CONTROL, dev_priv->savePP_CONTROL);
+ /* Display Port state */
+ if (SUPPORTS_INTEGRATED_DP(dev)) {
+ I915_WRITE(DP_B, dev_priv->saveDP_B);
+ I915_WRITE(DP_C, dev_priv->saveDP_C);
+ I915_WRITE(DP_D, dev_priv->saveDP_D);
+ }
/* FIXME: restore TV & SDVO state */
/* FBC info */
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index cdd126d068a7..7cc447191028 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -97,11 +97,14 @@ static void
parse_lfp_panel_data(struct drm_i915_private *dev_priv,
struct bdb_header *bdb)
{
+ struct drm_device *dev = dev_priv->dev;
struct bdb_lvds_options *lvds_options;
struct bdb_lvds_lfp_data *lvds_lfp_data;
+ struct bdb_lvds_lfp_data_ptrs *lvds_lfp_data_ptrs;
struct bdb_lvds_lfp_data_entry *entry;
struct lvds_dvo_timing *dvo_timing;
struct drm_display_mode *panel_fixed_mode;
+ int lfp_data_size;
/* Defaults if we can't find VBT info */
dev_priv->lvds_dither = 0;
@@ -119,10 +122,25 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv,
if (!lvds_lfp_data)
return;
+ lvds_lfp_data_ptrs = find_section(bdb, BDB_LVDS_LFP_DATA_PTRS);
+ if (!lvds_lfp_data_ptrs)
+ return;
+
dev_priv->lvds_vbt = 1;
- entry = &lvds_lfp_data->data[lvds_options->panel_type];
- dvo_timing = &entry->dvo_timing;
+ lfp_data_size = lvds_lfp_data_ptrs->ptr[1].dvo_timing_offset -
+ lvds_lfp_data_ptrs->ptr[0].dvo_timing_offset;
+ entry = (struct bdb_lvds_lfp_data_entry *)
+ ((uint8_t *)lvds_lfp_data->data + (lfp_data_size *
+ lvds_options->panel_type));
+
+ /* On IGDNG mobile, LVDS data block removes panel fitting registers.
+ So dec 2 dword from dvo_timing offset */
+ if (IS_IGDNG(dev))
+ dvo_timing = (struct lvds_dvo_timing *)
+ ((u8 *)&entry->dvo_timing - 8);
+ else
+ dvo_timing = &entry->dvo_timing;
panel_fixed_mode = kzalloc(sizeof(*panel_fixed_mode), GFP_KERNEL);
@@ -185,10 +203,12 @@ parse_general_features(struct drm_i915_private *dev_priv,
dev_priv->lvds_use_ssc = general->enable_ssc;
if (dev_priv->lvds_use_ssc) {
- if (IS_I855(dev_priv->dev))
- dev_priv->lvds_ssc_freq = general->ssc_freq ? 66 : 48;
- else
- dev_priv->lvds_ssc_freq = general->ssc_freq ? 100 : 96;
+ if (IS_I85X(dev_priv->dev))
+ dev_priv->lvds_ssc_freq =
+ general->ssc_freq ? 66 : 48;
+ else
+ dev_priv->lvds_ssc_freq =
+ general->ssc_freq ? 100 : 96;
}
}
}
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index 6de97fc66029..d6a1a6e5539a 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -46,7 +46,7 @@ static void intel_crt_dpms(struct drm_encoder *encoder, int mode)
temp = I915_READ(reg);
temp &= ~(ADPA_HSYNC_CNTL_DISABLE | ADPA_VSYNC_CNTL_DISABLE);
- temp |= ADPA_DAC_ENABLE;
+ temp &= ~ADPA_DAC_ENABLE;
switch(mode) {
case DRM_MODE_DPMS_ON:
@@ -428,8 +428,34 @@ static void intel_crt_destroy(struct drm_connector *connector)
static int intel_crt_get_modes(struct drm_connector *connector)
{
+ int ret;
struct intel_output *intel_output = to_intel_output(connector);
- return intel_ddc_get_modes(intel_output);
+ struct i2c_adapter *ddcbus;
+ struct drm_device *dev = connector->dev;
+
+
+ ret = intel_ddc_get_modes(intel_output);
+ if (ret || !IS_G4X(dev))
+ goto end;
+
+ ddcbus = intel_output->ddc_bus;
+ /* Try to probe digital port for output in DVI-I -> VGA mode. */
+ intel_output->ddc_bus =
+ intel_i2c_create(connector->dev, GPIOD, "CRTDDC_D");
+
+ if (!intel_output->ddc_bus) {
+ intel_output->ddc_bus = ddcbus;
+ dev_printk(KERN_ERR, &connector->dev->pdev->dev,
+ "DDC bus registration failed for CRTDDC_D.\n");
+ goto end;
+ }
+ /* Try to get modes by GPIOD port */
+ ret = intel_ddc_get_modes(intel_output);
+ intel_i2c_destroy(ddcbus);
+
+end:
+ return ret;
+
}
static int intel_crt_set_property(struct drm_connector *connector,
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 3e1c78162119..508838ee31e0 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -25,14 +25,17 @@
*/
#include <linux/i2c.h>
+#include <linux/kernel.h>
#include "drmP.h"
#include "intel_drv.h"
#include "i915_drm.h"
#include "i915_drv.h"
+#include "intel_dp.h"
#include "drm_crtc_helper.h"
bool intel_pipe_has_type (struct drm_crtc *crtc, int type);
+static void intel_update_watermarks(struct drm_device *dev);
typedef struct {
/* given values */
@@ -127,19 +130,6 @@ struct intel_limit {
#define I9XX_P2_LVDS_FAST 7
#define I9XX_P2_LVDS_SLOW_LIMIT 112000
-#define INTEL_LIMIT_I8XX_DVO_DAC 0
-#define INTEL_LIMIT_I8XX_LVDS 1
-#define INTEL_LIMIT_I9XX_SDVO_DAC 2
-#define INTEL_LIMIT_I9XX_LVDS 3
-#define INTEL_LIMIT_G4X_SDVO 4
-#define INTEL_LIMIT_G4X_HDMI_DAC 5
-#define INTEL_LIMIT_G4X_SINGLE_CHANNEL_LVDS 6
-#define INTEL_LIMIT_G4X_DUAL_CHANNEL_LVDS 7
-#define INTEL_LIMIT_IGD_SDVO_DAC 8
-#define INTEL_LIMIT_IGD_LVDS 9
-#define INTEL_LIMIT_IGDNG_SDVO_DAC 10
-#define INTEL_LIMIT_IGDNG_LVDS 11
-
/*The parameter is for SDVO on G4x platform*/
#define G4X_DOT_SDVO_MIN 25000
#define G4X_DOT_SDVO_MAX 270000
@@ -218,6 +208,25 @@ struct intel_limit {
#define G4X_P2_DUAL_CHANNEL_LVDS_FAST 7
#define G4X_P2_DUAL_CHANNEL_LVDS_LIMIT 0
+/*The parameter is for DISPLAY PORT on G4x platform*/
+#define G4X_DOT_DISPLAY_PORT_MIN 161670
+#define G4X_DOT_DISPLAY_PORT_MAX 227000
+#define G4X_N_DISPLAY_PORT_MIN 1
+#define G4X_N_DISPLAY_PORT_MAX 2
+#define G4X_M_DISPLAY_PORT_MIN 97
+#define G4X_M_DISPLAY_PORT_MAX 108
+#define G4X_M1_DISPLAY_PORT_MIN 0x10
+#define G4X_M1_DISPLAY_PORT_MAX 0x12
+#define G4X_M2_DISPLAY_PORT_MIN 0x05
+#define G4X_M2_DISPLAY_PORT_MAX 0x06
+#define G4X_P_DISPLAY_PORT_MIN 10
+#define G4X_P_DISPLAY_PORT_MAX 20
+#define G4X_P1_DISPLAY_PORT_MIN 1
+#define G4X_P1_DISPLAY_PORT_MAX 2
+#define G4X_P2_DISPLAY_PORT_SLOW 10
+#define G4X_P2_DISPLAY_PORT_FAST 10
+#define G4X_P2_DISPLAY_PORT_LIMIT 0
+
/* IGDNG */
/* as we calculate clock using (register_value + 2) for
N/M1/M2, so here the range value for them is (actual_value-2).
@@ -256,8 +265,11 @@ static bool
intel_igdng_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
int target, int refclk, intel_clock_t *best_clock);
-static const intel_limit_t intel_limits[] = {
- { /* INTEL_LIMIT_I8XX_DVO_DAC */
+static bool
+intel_find_pll_g4x_dp(const intel_limit_t *, struct drm_crtc *crtc,
+ int target, int refclk, intel_clock_t *best_clock);
+
+static const intel_limit_t intel_limits_i8xx_dvo = {
.dot = { .min = I8XX_DOT_MIN, .max = I8XX_DOT_MAX },
.vco = { .min = I8XX_VCO_MIN, .max = I8XX_VCO_MAX },
.n = { .min = I8XX_N_MIN, .max = I8XX_N_MAX },
@@ -269,8 +281,9 @@ static const intel_limit_t intel_limits[] = {
.p2 = { .dot_limit = I8XX_P2_SLOW_LIMIT,
.p2_slow = I8XX_P2_SLOW, .p2_fast = I8XX_P2_FAST },
.find_pll = intel_find_best_PLL,
- },
- { /* INTEL_LIMIT_I8XX_LVDS */
+};
+
+static const intel_limit_t intel_limits_i8xx_lvds = {
.dot = { .min = I8XX_DOT_MIN, .max = I8XX_DOT_MAX },
.vco = { .min = I8XX_VCO_MIN, .max = I8XX_VCO_MAX },
.n = { .min = I8XX_N_MIN, .max = I8XX_N_MAX },
@@ -282,8 +295,9 @@ static const intel_limit_t intel_limits[] = {
.p2 = { .dot_limit = I8XX_P2_SLOW_LIMIT,
.p2_slow = I8XX_P2_LVDS_SLOW, .p2_fast = I8XX_P2_LVDS_FAST },
.find_pll = intel_find_best_PLL,
- },
- { /* INTEL_LIMIT_I9XX_SDVO_DAC */
+};
+
+static const intel_limit_t intel_limits_i9xx_sdvo = {
.dot = { .min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX },
.vco = { .min = I9XX_VCO_MIN, .max = I9XX_VCO_MAX },
.n = { .min = I9XX_N_MIN, .max = I9XX_N_MAX },
@@ -295,8 +309,9 @@ static const intel_limit_t intel_limits[] = {
.p2 = { .dot_limit = I9XX_P2_SDVO_DAC_SLOW_LIMIT,
.p2_slow = I9XX_P2_SDVO_DAC_SLOW, .p2_fast = I9XX_P2_SDVO_DAC_FAST },
.find_pll = intel_find_best_PLL,
- },
- { /* INTEL_LIMIT_I9XX_LVDS */
+};
+
+static const intel_limit_t intel_limits_i9xx_lvds = {
.dot = { .min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX },
.vco = { .min = I9XX_VCO_MIN, .max = I9XX_VCO_MAX },
.n = { .min = I9XX_N_MIN, .max = I9XX_N_MAX },
@@ -311,9 +326,10 @@ static const intel_limit_t intel_limits[] = {
.p2 = { .dot_limit = I9XX_P2_LVDS_SLOW_LIMIT,
.p2_slow = I9XX_P2_LVDS_SLOW, .p2_fast = I9XX_P2_LVDS_FAST },
.find_pll = intel_find_best_PLL,
- },
+};
+
/* below parameter and function is for G4X Chipset Family*/
- { /* INTEL_LIMIT_G4X_SDVO */
+static const intel_limit_t intel_limits_g4x_sdvo = {
.dot = { .min = G4X_DOT_SDVO_MIN, .max = G4X_DOT_SDVO_MAX },
.vco = { .min = G4X_VCO_MIN, .max = G4X_VCO_MAX},
.n = { .min = G4X_N_SDVO_MIN, .max = G4X_N_SDVO_MAX },
@@ -327,8 +343,9 @@ static const intel_limit_t intel_limits[] = {
.p2_fast = G4X_P2_SDVO_FAST
},
.find_pll = intel_g4x_find_best_PLL,
- },
- { /* INTEL_LIMIT_G4X_HDMI_DAC */
+};
+
+static const intel_limit_t intel_limits_g4x_hdmi = {
.dot = { .min = G4X_DOT_HDMI_DAC_MIN, .max = G4X_DOT_HDMI_DAC_MAX },
.vco = { .min = G4X_VCO_MIN, .max = G4X_VCO_MAX},
.n = { .min = G4X_N_HDMI_DAC_MIN, .max = G4X_N_HDMI_DAC_MAX },
@@ -342,8 +359,9 @@ static const intel_limit_t intel_limits[] = {
.p2_fast = G4X_P2_HDMI_DAC_FAST
},
.find_pll = intel_g4x_find_best_PLL,
- },
- { /* INTEL_LIMIT_G4X_SINGLE_CHANNEL_LVDS */
+};
+
+static const intel_limit_t intel_limits_g4x_single_channel_lvds = {
.dot = { .min = G4X_DOT_SINGLE_CHANNEL_LVDS_MIN,
.max = G4X_DOT_SINGLE_CHANNEL_LVDS_MAX },
.vco = { .min = G4X_VCO_MIN,
@@ -365,8 +383,9 @@ static const intel_limit_t intel_limits[] = {
.p2_fast = G4X_P2_SINGLE_CHANNEL_LVDS_FAST
},
.find_pll = intel_g4x_find_best_PLL,
- },
- { /* INTEL_LIMIT_G4X_DUAL_CHANNEL_LVDS */
+};
+
+static const intel_limit_t intel_limits_g4x_dual_channel_lvds = {
.dot = { .min = G4X_DOT_DUAL_CHANNEL_LVDS_MIN,
.max = G4X_DOT_DUAL_CHANNEL_LVDS_MAX },
.vco = { .min = G4X_VCO_MIN,
@@ -388,8 +407,32 @@ static const intel_limit_t intel_limits[] = {
.p2_fast = G4X_P2_DUAL_CHANNEL_LVDS_FAST
},
.find_pll = intel_g4x_find_best_PLL,
- },
- { /* INTEL_LIMIT_IGD_SDVO */
+};
+
+static const intel_limit_t intel_limits_g4x_display_port = {
+ .dot = { .min = G4X_DOT_DISPLAY_PORT_MIN,
+ .max = G4X_DOT_DISPLAY_PORT_MAX },
+ .vco = { .min = G4X_VCO_MIN,
+ .max = G4X_VCO_MAX},
+ .n = { .min = G4X_N_DISPLAY_PORT_MIN,
+ .max = G4X_N_DISPLAY_PORT_MAX },
+ .m = { .min = G4X_M_DISPLAY_PORT_MIN,
+ .max = G4X_M_DISPLAY_PORT_MAX },
+ .m1 = { .min = G4X_M1_DISPLAY_PORT_MIN,
+ .max = G4X_M1_DISPLAY_PORT_MAX },
+ .m2 = { .min = G4X_M2_DISPLAY_PORT_MIN,
+ .max = G4X_M2_DISPLAY_PORT_MAX },
+ .p = { .min = G4X_P_DISPLAY_PORT_MIN,
+ .max = G4X_P_DISPLAY_PORT_MAX },
+ .p1 = { .min = G4X_P1_DISPLAY_PORT_MIN,
+ .max = G4X_P1_DISPLAY_PORT_MAX},
+ .p2 = { .dot_limit = G4X_P2_DISPLAY_PORT_LIMIT,
+ .p2_slow = G4X_P2_DISPLAY_PORT_SLOW,
+ .p2_fast = G4X_P2_DISPLAY_PORT_FAST },
+ .find_pll = intel_find_pll_g4x_dp,
+};
+
+static const intel_limit_t intel_limits_igd_sdvo = {
.dot = { .min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX},
.vco = { .min = IGD_VCO_MIN, .max = IGD_VCO_MAX },
.n = { .min = IGD_N_MIN, .max = IGD_N_MAX },
@@ -401,8 +444,9 @@ static const intel_limit_t intel_limits[] = {
.p2 = { .dot_limit = I9XX_P2_SDVO_DAC_SLOW_LIMIT,
.p2_slow = I9XX_P2_SDVO_DAC_SLOW, .p2_fast = I9XX_P2_SDVO_DAC_FAST },
.find_pll = intel_find_best_PLL,
- },
- { /* INTEL_LIMIT_IGD_LVDS */
+};
+
+static const intel_limit_t intel_limits_igd_lvds = {
.dot = { .min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX },
.vco = { .min = IGD_VCO_MIN, .max = IGD_VCO_MAX },
.n = { .min = IGD_N_MIN, .max = IGD_N_MAX },
@@ -415,8 +459,9 @@ static const intel_limit_t intel_limits[] = {
.p2 = { .dot_limit = I9XX_P2_LVDS_SLOW_LIMIT,
.p2_slow = I9XX_P2_LVDS_SLOW, .p2_fast = I9XX_P2_LVDS_SLOW },
.find_pll = intel_find_best_PLL,
- },
- { /* INTEL_LIMIT_IGDNG_SDVO_DAC */
+};
+
+static const intel_limit_t intel_limits_igdng_sdvo = {
.dot = { .min = IGDNG_DOT_MIN, .max = IGDNG_DOT_MAX },
.vco = { .min = IGDNG_VCO_MIN, .max = IGDNG_VCO_MAX },
.n = { .min = IGDNG_N_MIN, .max = IGDNG_N_MAX },
@@ -429,8 +474,9 @@ static const intel_limit_t intel_limits[] = {
.p2_slow = IGDNG_P2_SDVO_DAC_SLOW,
.p2_fast = IGDNG_P2_SDVO_DAC_FAST },
.find_pll = intel_igdng_find_best_PLL,
- },
- { /* INTEL_LIMIT_IGDNG_LVDS */
+};
+
+static const intel_limit_t intel_limits_igdng_lvds = {
.dot = { .min = IGDNG_DOT_MIN, .max = IGDNG_DOT_MAX },
.vco = { .min = IGDNG_VCO_MIN, .max = IGDNG_VCO_MAX },
.n = { .min = IGDNG_N_MIN, .max = IGDNG_N_MAX },
@@ -443,16 +489,15 @@ static const intel_limit_t intel_limits[] = {
.p2_slow = IGDNG_P2_LVDS_SLOW,
.p2_fast = IGDNG_P2_LVDS_FAST },
.find_pll = intel_igdng_find_best_PLL,
- },
};
static const intel_limit_t *intel_igdng_limit(struct drm_crtc *crtc)
{
const intel_limit_t *limit;
if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
- limit = &intel_limits[INTEL_LIMIT_IGDNG_LVDS];
+ limit = &intel_limits_igdng_lvds;
else
- limit = &intel_limits[INTEL_LIMIT_IGDNG_SDVO_DAC];
+ limit = &intel_limits_igdng_sdvo;
return limit;
}
@@ -467,19 +512,19 @@ static const intel_limit_t *intel_g4x_limit(struct drm_crtc *crtc)
if ((I915_READ(LVDS) & LVDS_CLKB_POWER_MASK) ==
LVDS_CLKB_POWER_UP)
/* LVDS with dual channel */
- limit = &intel_limits
- [INTEL_LIMIT_G4X_DUAL_CHANNEL_LVDS];
+ limit = &intel_limits_g4x_dual_channel_lvds;
else
/* LVDS with dual channel */
- limit = &intel_limits
- [INTEL_LIMIT_G4X_SINGLE_CHANNEL_LVDS];
+ limit = &intel_limits_g4x_single_channel_lvds;
} else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI) ||
intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG)) {
- limit = &intel_limits[INTEL_LIMIT_G4X_HDMI_DAC];
+ limit = &intel_limits_g4x_hdmi;
} else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO)) {
- limit = &intel_limits[INTEL_LIMIT_G4X_SDVO];
+ limit = &intel_limits_g4x_sdvo;
+ } else if (intel_pipe_has_type (crtc, INTEL_OUTPUT_DISPLAYPORT)) {
+ limit = &intel_limits_g4x_display_port;
} else /* The option is for other outputs */
- limit = &intel_limits[INTEL_LIMIT_I9XX_SDVO_DAC];
+ limit = &intel_limits_i9xx_sdvo;
return limit;
}
@@ -495,19 +540,19 @@ static const intel_limit_t *intel_limit(struct drm_crtc *crtc)
limit = intel_g4x_limit(crtc);
} else if (IS_I9XX(dev) && !IS_IGD(dev)) {
if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
- limit = &intel_limits[INTEL_LIMIT_I9XX_LVDS];
+ limit = &intel_limits_i9xx_lvds;
else
- limit = &intel_limits[INTEL_LIMIT_I9XX_SDVO_DAC];
+ limit = &intel_limits_i9xx_sdvo;
} else if (IS_IGD(dev)) {
if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
- limit = &intel_limits[INTEL_LIMIT_IGD_LVDS];
+ limit = &intel_limits_igd_lvds;
else
- limit = &intel_limits[INTEL_LIMIT_IGD_SDVO_DAC];
+ limit = &intel_limits_igd_sdvo;
} else {
if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
- limit = &intel_limits[INTEL_LIMIT_I8XX_LVDS];
+ limit = &intel_limits_i8xx_lvds;
else
- limit = &intel_limits[INTEL_LIMIT_I8XX_DVO_DAC];
+ limit = &intel_limits_i8xx_dvo;
}
return limit;
}
@@ -764,6 +809,32 @@ out:
return found;
}
+/* DisplayPort has only two frequencies, 162MHz and 270MHz */
+static bool
+intel_find_pll_g4x_dp(const intel_limit_t *limit, struct drm_crtc *crtc,
+ int target, int refclk, intel_clock_t *best_clock)
+{
+ intel_clock_t clock;
+ if (target < 200000) {
+ clock.p1 = 2;
+ clock.p2 = 10;
+ clock.n = 2;
+ clock.m1 = 23;
+ clock.m2 = 8;
+ } else {
+ clock.p1 = 1;
+ clock.p2 = 10;
+ clock.n = 1;
+ clock.m1 = 14;
+ clock.m2 = 2;
+ }
+ clock.m = 5 * (clock.m1 + 2) + (clock.m2 + 2);
+ clock.p = (clock.p1 * clock.p2);
+ clock.dot = 96000 * clock.m / (clock.n + 2) / clock.p;
+ memcpy(best_clock, &clock, sizeof(intel_clock_t));
+ return true;
+}
+
void
intel_wait_for_vblank(struct drm_device *dev)
{
@@ -933,7 +1004,7 @@ static void igdng_crtc_dpms(struct drm_crtc *crtc, int mode)
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
- int plane = intel_crtc->pipe;
+ int plane = intel_crtc->plane;
int pch_dpll_reg = (pipe == 0) ? PCH_DPLL_A : PCH_DPLL_B;
int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
int dspcntr_reg = (plane == 0) ? DSPACNTR : DSPBCNTR;
@@ -1263,8 +1334,10 @@ static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode)
/* Give the overlay scaler a chance to enable if it's on this pipe */
//intel_crtc_dpms_video(crtc, true); TODO
+ intel_update_watermarks(dev);
break;
case DRM_MODE_DPMS_OFF:
+ intel_update_watermarks(dev);
/* Give the overlay scaler a chance to disable if it's on this pipe */
//intel_crtc_dpms_video(crtc, FALSE); TODO
@@ -1443,7 +1516,6 @@ static int intel_get_core_clock_speed(struct drm_device *dev)
return 0; /* Silence gcc warning */
}
-
/**
* Return the pipe currently connected to the panel fitter,
* or -1 if the panel fitter is not present or not in use
@@ -1502,7 +1574,7 @@ igdng_compute_m_n(int bytes_per_pixel, int nlanes,
temp = (u64) DATA_N * pixel_clock;
temp = div_u64(temp, link_clock);
- m_n->gmch_m = (temp * bytes_per_pixel) / nlanes;
+ m_n->gmch_m = div_u64(temp * bytes_per_pixel, nlanes);
m_n->gmch_n = DATA_N;
fdi_reduce_ratio(&m_n->gmch_m, &m_n->gmch_n);
@@ -1513,6 +1585,420 @@ igdng_compute_m_n(int bytes_per_pixel, int nlanes,
}
+struct intel_watermark_params {
+ unsigned long fifo_size;
+ unsigned long max_wm;
+ unsigned long default_wm;
+ unsigned long guard_size;
+ unsigned long cacheline_size;
+};
+
+/* IGD has different values for various configs */
+static struct intel_watermark_params igd_display_wm = {
+ IGD_DISPLAY_FIFO,
+ IGD_MAX_WM,
+ IGD_DFT_WM,
+ IGD_GUARD_WM,
+ IGD_FIFO_LINE_SIZE
+};
+static struct intel_watermark_params igd_display_hplloff_wm = {
+ IGD_DISPLAY_FIFO,
+ IGD_MAX_WM,
+ IGD_DFT_HPLLOFF_WM,
+ IGD_GUARD_WM,
+ IGD_FIFO_LINE_SIZE
+};
+static struct intel_watermark_params igd_cursor_wm = {
+ IGD_CURSOR_FIFO,
+ IGD_CURSOR_MAX_WM,
+ IGD_CURSOR_DFT_WM,
+ IGD_CURSOR_GUARD_WM,
+ IGD_FIFO_LINE_SIZE,
+};
+static struct intel_watermark_params igd_cursor_hplloff_wm = {
+ IGD_CURSOR_FIFO,
+ IGD_CURSOR_MAX_WM,
+ IGD_CURSOR_DFT_WM,
+ IGD_CURSOR_GUARD_WM,
+ IGD_FIFO_LINE_SIZE
+};
+static struct intel_watermark_params i945_wm_info = {
+ I915_FIFO_LINE_SIZE,
+ I915_MAX_WM,
+ 1,
+ 0,
+ IGD_FIFO_LINE_SIZE
+};
+static struct intel_watermark_params i915_wm_info = {
+ I945_FIFO_SIZE,
+ I915_MAX_WM,
+ 1,
+ 0,
+ I915_FIFO_LINE_SIZE
+};
+static struct intel_watermark_params i855_wm_info = {
+ I855GM_FIFO_SIZE,
+ I915_MAX_WM,
+ 1,
+ 0,
+ I830_FIFO_LINE_SIZE
+};
+static struct intel_watermark_params i830_wm_info = {
+ I830_FIFO_SIZE,
+ I915_MAX_WM,
+ 1,
+ 0,
+ I830_FIFO_LINE_SIZE
+};
+
+static unsigned long intel_calculate_wm(unsigned long clock_in_khz,
+ struct intel_watermark_params *wm,
+ int pixel_size,
+ unsigned long latency_ns)
+{
+ unsigned long bytes_required, wm_size;
+
+ bytes_required = (clock_in_khz * pixel_size * latency_ns) / 1000000;
+ bytes_required /= wm->cacheline_size;
+ wm_size = wm->fifo_size - bytes_required - wm->guard_size;
+
+ if (wm_size > wm->max_wm)
+ wm_size = wm->max_wm;
+ if (wm_size == 0)
+ wm_size = wm->default_wm;
+ return wm_size;
+}
+
+struct cxsr_latency {
+ int is_desktop;
+ unsigned long fsb_freq;
+ unsigned long mem_freq;
+ unsigned long display_sr;
+ unsigned long display_hpll_disable;
+ unsigned long cursor_sr;
+ unsigned long cursor_hpll_disable;
+};
+
+static struct cxsr_latency cxsr_latency_table[] = {
+ {1, 800, 400, 3382, 33382, 3983, 33983}, /* DDR2-400 SC */
+ {1, 800, 667, 3354, 33354, 3807, 33807}, /* DDR2-667 SC */
+ {1, 800, 800, 3347, 33347, 3763, 33763}, /* DDR2-800 SC */
+
+ {1, 667, 400, 3400, 33400, 4021, 34021}, /* DDR2-400 SC */
+ {1, 667, 667, 3372, 33372, 3845, 33845}, /* DDR2-667 SC */
+ {1, 667, 800, 3386, 33386, 3822, 33822}, /* DDR2-800 SC */
+
+ {1, 400, 400, 3472, 33472, 4173, 34173}, /* DDR2-400 SC */
+ {1, 400, 667, 3443, 33443, 3996, 33996}, /* DDR2-667 SC */
+ {1, 400, 800, 3430, 33430, 3946, 33946}, /* DDR2-800 SC */
+
+ {0, 800, 400, 3438, 33438, 4065, 34065}, /* DDR2-400 SC */
+ {0, 800, 667, 3410, 33410, 3889, 33889}, /* DDR2-667 SC */
+ {0, 800, 800, 3403, 33403, 3845, 33845}, /* DDR2-800 SC */
+
+ {0, 667, 400, 3456, 33456, 4103, 34106}, /* DDR2-400 SC */
+ {0, 667, 667, 3428, 33428, 3927, 33927}, /* DDR2-667 SC */
+ {0, 667, 800, 3443, 33443, 3905, 33905}, /* DDR2-800 SC */
+
+ {0, 400, 400, 3528, 33528, 4255, 34255}, /* DDR2-400 SC */
+ {0, 400, 667, 3500, 33500, 4079, 34079}, /* DDR2-667 SC */
+ {0, 400, 800, 3487, 33487, 4029, 34029}, /* DDR2-800 SC */
+};
+
+static struct cxsr_latency *intel_get_cxsr_latency(int is_desktop, int fsb,
+ int mem)
+{
+ int i;
+ struct cxsr_latency *latency;
+
+ if (fsb == 0 || mem == 0)
+ return NULL;
+
+ for (i = 0; i < ARRAY_SIZE(cxsr_latency_table); i++) {
+ latency = &cxsr_latency_table[i];
+ if (is_desktop == latency->is_desktop &&
+ fsb == latency->fsb_freq && mem == latency->mem_freq)
+ break;
+ }
+ if (i >= ARRAY_SIZE(cxsr_latency_table)) {
+ DRM_DEBUG("Unknown FSB/MEM found, disable CxSR\n");
+ return NULL;
+ }
+ return latency;
+}
+
+static void igd_disable_cxsr(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 reg;
+
+ /* deactivate cxsr */
+ reg = I915_READ(DSPFW3);
+ reg &= ~(IGD_SELF_REFRESH_EN);
+ I915_WRITE(DSPFW3, reg);
+ DRM_INFO("Big FIFO is disabled\n");
+}
+
+static void igd_enable_cxsr(struct drm_device *dev, unsigned long clock,
+ int pixel_size)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 reg;
+ unsigned long wm;
+ struct cxsr_latency *latency;
+
+ latency = intel_get_cxsr_latency(IS_IGDG(dev), dev_priv->fsb_freq,
+ dev_priv->mem_freq);
+ if (!latency) {
+ DRM_DEBUG("Unknown FSB/MEM found, disable CxSR\n");
+ igd_disable_cxsr(dev);
+ return;
+ }
+
+ /* Display SR */
+ wm = intel_calculate_wm(clock, &igd_display_wm, pixel_size,
+ latency->display_sr);
+ reg = I915_READ(DSPFW1);
+ reg &= 0x7fffff;
+ reg |= wm << 23;
+ I915_WRITE(DSPFW1, reg);
+ DRM_DEBUG("DSPFW1 register is %x\n", reg);
+
+ /* cursor SR */
+ wm = intel_calculate_wm(clock, &igd_cursor_wm, pixel_size,
+ latency->cursor_sr);
+ reg = I915_READ(DSPFW3);
+ reg &= ~(0x3f << 24);
+ reg |= (wm & 0x3f) << 24;
+ I915_WRITE(DSPFW3, reg);
+
+ /* Display HPLL off SR */
+ wm = intel_calculate_wm(clock, &igd_display_hplloff_wm,
+ latency->display_hpll_disable, I915_FIFO_LINE_SIZE);
+ reg = I915_READ(DSPFW3);
+ reg &= 0xfffffe00;
+ reg |= wm & 0x1ff;
+ I915_WRITE(DSPFW3, reg);
+
+ /* cursor HPLL off SR */
+ wm = intel_calculate_wm(clock, &igd_cursor_hplloff_wm, pixel_size,
+ latency->cursor_hpll_disable);
+ reg = I915_READ(DSPFW3);
+ reg &= ~(0x3f << 16);
+ reg |= (wm & 0x3f) << 16;
+ I915_WRITE(DSPFW3, reg);
+ DRM_DEBUG("DSPFW3 register is %x\n", reg);
+
+ /* activate cxsr */
+ reg = I915_READ(DSPFW3);
+ reg |= IGD_SELF_REFRESH_EN;
+ I915_WRITE(DSPFW3, reg);
+
+ DRM_INFO("Big FIFO is enabled\n");
+
+ return;
+}
+
+const static int latency_ns = 5000; /* default for non-igd platforms */
+
+
+static void i965_update_wm(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ DRM_DEBUG("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR 8\n");
+
+ /* 965 has limitations... */
+ I915_WRITE(DSPFW1, (8 << 16) | (8 << 8) | (8 << 0));
+ I915_WRITE(DSPFW2, (8 << 8) | (8 << 0));
+}
+
+static void i9xx_update_wm(struct drm_device *dev, int planea_clock,
+ int planeb_clock, int sr_hdisplay, int pixel_size)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t fwater_lo = I915_READ(FW_BLC) & MM_FIFO_WATERMARK;
+ uint32_t fwater_hi = I915_READ(FW_BLC2) & LM_FIFO_WATERMARK;
+ int bsize, asize, cwm, bwm = 1, awm = 1, srwm = 1;
+ uint32_t dsparb = I915_READ(DSPARB);
+ int planea_entries, planeb_entries;
+ struct intel_watermark_params *wm_params;
+ unsigned long line_time_us;
+ int sr_clock, sr_entries = 0;
+
+ if (IS_I965GM(dev) || IS_I945GM(dev))
+ wm_params = &i945_wm_info;
+ else if (IS_I9XX(dev))
+ wm_params = &i915_wm_info;
+ else
+ wm_params = &i855_wm_info;
+
+ planea_entries = intel_calculate_wm(planea_clock, wm_params,
+ pixel_size, latency_ns);
+ planeb_entries = intel_calculate_wm(planeb_clock, wm_params,
+ pixel_size, latency_ns);
+
+ DRM_DEBUG("FIFO entries - A: %d, B: %d\n", planea_entries,
+ planeb_entries);
+
+ if (IS_I9XX(dev)) {
+ asize = dsparb & 0x7f;
+ bsize = (dsparb >> DSPARB_CSTART_SHIFT) & 0x7f;
+ } else {
+ asize = dsparb & 0x1ff;
+ bsize = (dsparb >> DSPARB_BEND_SHIFT) & 0x1ff;
+ }
+ DRM_DEBUG("FIFO size - A: %d, B: %d\n", asize, bsize);
+
+ /* Two extra entries for padding */
+ awm = asize - (planea_entries + 2);
+ bwm = bsize - (planeb_entries + 2);
+
+ /* Sanity check against potentially bad FIFO allocations */
+ if (awm <= 0) {
+ /* pipe is on but has too few FIFO entries */
+ if (planea_entries != 0)
+ DRM_DEBUG("plane A needs more FIFO entries\n");
+ awm = 1;
+ }
+ if (bwm <= 0) {
+ if (planeb_entries != 0)
+ DRM_DEBUG("plane B needs more FIFO entries\n");
+ bwm = 1;
+ }
+
+ /*
+ * Overlay gets an aggressive default since video jitter is bad.
+ */
+ cwm = 2;
+
+ /* Calc sr entries for one pipe configs */
+ if (!planea_clock || !planeb_clock) {
+ sr_clock = planea_clock ? planea_clock : planeb_clock;
+ line_time_us = (sr_hdisplay * 1000) / sr_clock;
+ sr_entries = (((latency_ns / line_time_us) + 1) * pixel_size *
+ sr_hdisplay) / 1000;
+ sr_entries = roundup(sr_entries / wm_params->cacheline_size, 1);
+ if (sr_entries < wm_params->fifo_size)
+ srwm = wm_params->fifo_size - sr_entries;
+ }
+
+ DRM_DEBUG("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n",
+ awm, bwm, cwm, srwm);
+
+ fwater_lo = fwater_lo | ((bwm & 0x3f) << 16) | (awm & 0x3f);
+ fwater_hi = fwater_hi | (cwm & 0x1f);
+
+ I915_WRITE(FW_BLC, fwater_lo);
+ I915_WRITE(FW_BLC2, fwater_hi);
+ if (IS_I9XX(dev))
+ I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN | (srwm & 0x3f));
+}
+
+static void i830_update_wm(struct drm_device *dev, int planea_clock,
+ int pixel_size)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t dsparb = I915_READ(DSPARB);
+ uint32_t fwater_lo = I915_READ(FW_BLC) & MM_FIFO_WATERMARK;
+ unsigned int asize, awm;
+ int planea_entries;
+
+ planea_entries = intel_calculate_wm(planea_clock, &i830_wm_info,
+ pixel_size, latency_ns);
+
+ asize = dsparb & 0x7f;
+
+ awm = asize - planea_entries;
+
+ fwater_lo = fwater_lo | awm;
+
+ I915_WRITE(FW_BLC, fwater_lo);
+}
+
+/**
+ * intel_update_watermarks - update FIFO watermark values based on current modes
+ *
+ * Calculate watermark values for the various WM regs based on current mode
+ * and plane configuration.
+ *
+ * There are several cases to deal with here:
+ * - normal (i.e. non-self-refresh)
+ * - self-refresh (SR) mode
+ * - lines are large relative to FIFO size (buffer can hold up to 2)
+ * - lines are small relative to FIFO size (buffer can hold more than 2
+ * lines), so need to account for TLB latency
+ *
+ * The normal calculation is:
+ * watermark = dotclock * bytes per pixel * latency
+ * where latency is platform & configuration dependent (we assume pessimal
+ * values here).
+ *
+ * The SR calculation is:
+ * watermark = (trunc(latency/line time)+1) * surface width *
+ * bytes per pixel
+ * where
+ * line time = htotal / dotclock
+ * and latency is assumed to be high, as above.
+ *
+ * The final value programmed to the register should always be rounded up,
+ * and include an extra 2 entries to account for clock crossings.
+ *
+ * We don't use the sprite, so we can ignore that. And on Crestline we have
+ * to set the non-SR watermarks to 8.
+ */
+static void intel_update_watermarks(struct drm_device *dev)
+{
+ struct drm_crtc *crtc;
+ struct intel_crtc *intel_crtc;
+ int sr_hdisplay = 0;
+ unsigned long planea_clock = 0, planeb_clock = 0, sr_clock = 0;
+ int enabled = 0, pixel_size = 0;
+
+ if (DSPARB_HWCONTROL(dev))
+ return;
+
+ /* Get the clock config from both planes */
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ intel_crtc = to_intel_crtc(crtc);
+ if (crtc->enabled) {
+ enabled++;
+ if (intel_crtc->plane == 0) {
+ DRM_DEBUG("plane A (pipe %d) clock: %d\n",
+ intel_crtc->pipe, crtc->mode.clock);
+ planea_clock = crtc->mode.clock;
+ } else {
+ DRM_DEBUG("plane B (pipe %d) clock: %d\n",
+ intel_crtc->pipe, crtc->mode.clock);
+ planeb_clock = crtc->mode.clock;
+ }
+ sr_hdisplay = crtc->mode.hdisplay;
+ sr_clock = crtc->mode.clock;
+ if (crtc->fb)
+ pixel_size = crtc->fb->bits_per_pixel / 8;
+ else
+ pixel_size = 4; /* by default */
+ }
+ }
+
+ if (enabled <= 0)
+ return;
+
+ /* Single pipe configs can enable self refresh */
+ if (enabled == 1 && IS_IGD(dev))
+ igd_enable_cxsr(dev, sr_clock, pixel_size);
+ else if (IS_IGD(dev))
+ igd_disable_cxsr(dev);
+
+ if (IS_I965G(dev))
+ i965_update_wm(dev);
+ else if (IS_I9XX(dev) || IS_MOBILE(dev))
+ i9xx_update_wm(dev, planea_clock, planeb_clock, sr_hdisplay,
+ pixel_size);
+ else
+ i830_update_wm(dev, planea_clock, pixel_size);
+}
+
static int intel_crtc_mode_set(struct drm_crtc *crtc,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode,
@@ -1541,7 +2027,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
intel_clock_t clock;
u32 dpll = 0, fp = 0, dspcntr, pipeconf;
bool ok, is_sdvo = false, is_dvo = false;
- bool is_crt = false, is_lvds = false, is_tv = false;
+ bool is_crt = false, is_lvds = false, is_tv = false, is_dp = false;
struct drm_mode_config *mode_config = &dev->mode_config;
struct drm_connector *connector;
const intel_limit_t *limit;
@@ -1585,6 +2071,9 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
case INTEL_OUTPUT_ANALOG:
is_crt = true;
break;
+ case INTEL_OUTPUT_DISPLAYPORT:
+ is_dp = true;
+ break;
}
num_outputs++;
@@ -1600,6 +2089,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
} else {
refclk = 48000;
}
+
/*
* Returns a set of divisors for the desired target clock with the given
@@ -1662,6 +2152,8 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
else if (IS_IGDNG(dev))
dpll |= (sdvo_pixel_multiply - 1) << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
}
+ if (is_dp)
+ dpll |= DPLL_DVO_HIGH_SPEED;
/* compute bitmask from p1 value */
if (IS_IGD(dev))
@@ -1809,6 +2301,8 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
I915_WRITE(lvds_reg, lvds);
I915_READ(lvds_reg);
}
+ if (is_dp)
+ intel_dp_set_m_n(crtc, mode, adjusted_mode);
I915_WRITE(fp_reg, fp);
I915_WRITE(dpll_reg, dpll);
@@ -1871,6 +2365,9 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
/* Flush the plane changes */
ret = intel_pipe_set_base(crtc, x, y, old_fb);
+
+ intel_update_watermarks(dev);
+
drm_vblank_post_modeset(dev, pipe);
return ret;
@@ -2359,6 +2856,7 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
drm_mode_crtc_set_gamma_size(&intel_crtc->base, 256);
intel_crtc->pipe = pipe;
+ intel_crtc->plane = pipe;
for (i = 0; i < 256; i++) {
intel_crtc->lut_r[i] = i;
intel_crtc->lut_g[i] = i;
@@ -2475,6 +2973,8 @@ static void intel_setup_outputs(struct drm_device *dev)
found = intel_sdvo_init(dev, SDVOB);
if (!found && SUPPORTS_INTEGRATED_HDMI(dev))
intel_hdmi_init(dev, SDVOB);
+ if (!found && SUPPORTS_INTEGRATED_DP(dev))
+ intel_dp_init(dev, DP_B);
}
/* Before G4X SDVOC doesn't have its own detect register */
@@ -2487,7 +2987,11 @@ static void intel_setup_outputs(struct drm_device *dev)
found = intel_sdvo_init(dev, SDVOC);
if (!found && SUPPORTS_INTEGRATED_HDMI(dev))
intel_hdmi_init(dev, SDVOC);
+ if (!found && SUPPORTS_INTEGRATED_DP(dev))
+ intel_dp_init(dev, DP_C);
}
+ if (SUPPORTS_INTEGRATED_DP(dev) && (I915_READ(DP_D) & DP_DETECTED))
+ intel_dp_init(dev, DP_D);
} else
intel_dvo_init(dev);
@@ -2530,6 +3034,11 @@ static void intel_setup_outputs(struct drm_device *dev)
(1 << 1));
clone_mask = (1 << INTEL_OUTPUT_TVOUT);
break;
+ case INTEL_OUTPUT_DISPLAYPORT:
+ crtc_mask = ((1 << 0) |
+ (1 << 1));
+ clone_mask = (1 << INTEL_OUTPUT_DISPLAYPORT);
+ break;
}
encoder->possible_crtcs = crtc_mask;
encoder->possible_clones = intel_connector_clones(dev, clone_mask);
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
new file mode 100644
index 000000000000..6770ae88370d
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -0,0 +1,1156 @@
+/*
+ * Copyright © 2008 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Keith Packard <keithp@keithp.com>
+ *
+ */
+
+#include <linux/i2c.h>
+#include "drmP.h"
+#include "drm.h"
+#include "drm_crtc.h"
+#include "drm_crtc_helper.h"
+#include "intel_drv.h"
+#include "i915_drm.h"
+#include "i915_drv.h"
+#include "intel_dp.h"
+
+#define DP_LINK_STATUS_SIZE 6
+#define DP_LINK_CHECK_TIMEOUT (10 * 1000)
+
+#define DP_LINK_CONFIGURATION_SIZE 9
+
+struct intel_dp_priv {
+ uint32_t output_reg;
+ uint32_t DP;
+ uint8_t link_configuration[DP_LINK_CONFIGURATION_SIZE];
+ uint32_t save_DP;
+ uint8_t save_link_configuration[DP_LINK_CONFIGURATION_SIZE];
+ bool has_audio;
+ int dpms_mode;
+ uint8_t link_bw;
+ uint8_t lane_count;
+ uint8_t dpcd[4];
+ struct intel_output *intel_output;
+ struct i2c_adapter adapter;
+ struct i2c_algo_dp_aux_data algo;
+};
+
+static void
+intel_dp_link_train(struct intel_output *intel_output, uint32_t DP,
+ uint8_t link_configuration[DP_LINK_CONFIGURATION_SIZE]);
+
+static void
+intel_dp_link_down(struct intel_output *intel_output, uint32_t DP);
+
+static int
+intel_dp_max_lane_count(struct intel_output *intel_output)
+{
+ struct intel_dp_priv *dp_priv = intel_output->dev_priv;
+ int max_lane_count = 4;
+
+ if (dp_priv->dpcd[0] >= 0x11) {
+ max_lane_count = dp_priv->dpcd[2] & 0x1f;
+ switch (max_lane_count) {
+ case 1: case 2: case 4:
+ break;
+ default:
+ max_lane_count = 4;
+ }
+ }
+ return max_lane_count;
+}
+
+static int
+intel_dp_max_link_bw(struct intel_output *intel_output)
+{
+ struct intel_dp_priv *dp_priv = intel_output->dev_priv;
+ int max_link_bw = dp_priv->dpcd[1];
+
+ switch (max_link_bw) {
+ case DP_LINK_BW_1_62:
+ case DP_LINK_BW_2_7:
+ break;
+ default:
+ max_link_bw = DP_LINK_BW_1_62;
+ break;
+ }
+ return max_link_bw;
+}
+
+static int
+intel_dp_link_clock(uint8_t link_bw)
+{
+ if (link_bw == DP_LINK_BW_2_7)
+ return 270000;
+ else
+ return 162000;
+}
+
+/* I think this is a fiction */
+static int
+intel_dp_link_required(int pixel_clock)
+{
+ return pixel_clock * 3;
+}
+
+static int
+intel_dp_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ struct intel_output *intel_output = to_intel_output(connector);
+ int max_link_clock = intel_dp_link_clock(intel_dp_max_link_bw(intel_output));
+ int max_lanes = intel_dp_max_lane_count(intel_output);
+
+ if (intel_dp_link_required(mode->clock) > max_link_clock * max_lanes)
+ return MODE_CLOCK_HIGH;
+
+ if (mode->clock < 10000)
+ return MODE_CLOCK_LOW;
+
+ return MODE_OK;
+}
+
+static uint32_t
+pack_aux(uint8_t *src, int src_bytes)
+{
+ int i;
+ uint32_t v = 0;
+
+ if (src_bytes > 4)
+ src_bytes = 4;
+ for (i = 0; i < src_bytes; i++)
+ v |= ((uint32_t) src[i]) << ((3-i) * 8);
+ return v;
+}
+
+static void
+unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes)
+{
+ int i;
+ if (dst_bytes > 4)
+ dst_bytes = 4;
+ for (i = 0; i < dst_bytes; i++)
+ dst[i] = src >> ((3-i) * 8);
+}
+
+/* hrawclock is 1/4 the FSB frequency */
+static int
+intel_hrawclk(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t clkcfg;
+
+ clkcfg = I915_READ(CLKCFG);
+ switch (clkcfg & CLKCFG_FSB_MASK) {
+ case CLKCFG_FSB_400:
+ return 100;
+ case CLKCFG_FSB_533:
+ return 133;
+ case CLKCFG_FSB_667:
+ return 166;
+ case CLKCFG_FSB_800:
+ return 200;
+ case CLKCFG_FSB_1067:
+ return 266;
+ case CLKCFG_FSB_1333:
+ return 333;
+ /* these two are just a guess; one of them might be right */
+ case CLKCFG_FSB_1600:
+ case CLKCFG_FSB_1600_ALT:
+ return 400;
+ default:
+ return 133;
+ }
+}
+
+static int
+intel_dp_aux_ch(struct intel_output *intel_output,
+ uint8_t *send, int send_bytes,
+ uint8_t *recv, int recv_size)
+{
+ struct intel_dp_priv *dp_priv = intel_output->dev_priv;
+ uint32_t output_reg = dp_priv->output_reg;
+ struct drm_device *dev = intel_output->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t ch_ctl = output_reg + 0x10;
+ uint32_t ch_data = ch_ctl + 4;
+ int i;
+ int recv_bytes;
+ uint32_t ctl;
+ uint32_t status;
+ uint32_t aux_clock_divider;
+ int try;
+
+ /* The clock divider is based off the hrawclk,
+ * and would like to run at 2MHz. So, take the
+ * hrawclk value and divide by 2 and use that
+ */
+ aux_clock_divider = intel_hrawclk(dev) / 2;
+ /* Must try at least 3 times according to DP spec */
+ for (try = 0; try < 5; try++) {
+ /* Load the send data into the aux channel data registers */
+ for (i = 0; i < send_bytes; i += 4) {
+ uint32_t d = pack_aux(send + i, send_bytes - i);;
+
+ I915_WRITE(ch_data + i, d);
+ }
+
+ ctl = (DP_AUX_CH_CTL_SEND_BUSY |
+ DP_AUX_CH_CTL_TIME_OUT_400us |
+ (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
+ (5 << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
+ (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT) |
+ DP_AUX_CH_CTL_DONE |
+ DP_AUX_CH_CTL_TIME_OUT_ERROR |
+ DP_AUX_CH_CTL_RECEIVE_ERROR);
+
+ /* Send the command and wait for it to complete */
+ I915_WRITE(ch_ctl, ctl);
+ (void) I915_READ(ch_ctl);
+ for (;;) {
+ udelay(100);
+ status = I915_READ(ch_ctl);
+ if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
+ break;
+ }
+
+ /* Clear done status and any errors */
+ I915_WRITE(ch_ctl, (ctl |
+ DP_AUX_CH_CTL_DONE |
+ DP_AUX_CH_CTL_TIME_OUT_ERROR |
+ DP_AUX_CH_CTL_RECEIVE_ERROR));
+ (void) I915_READ(ch_ctl);
+ if ((status & DP_AUX_CH_CTL_TIME_OUT_ERROR) == 0)
+ break;
+ }
+
+ if ((status & DP_AUX_CH_CTL_DONE) == 0) {
+ DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status);
+ return -EBUSY;
+ }
+
+ /* Check for timeout or receive error.
+ * Timeouts occur when the sink is not connected
+ */
+ if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
+ DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status);
+ return -EIO;
+ }
+
+ /* Timeouts occur when the device isn't connected, so they're
+ * "normal" -- don't fill the kernel log with these */
+ if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) {
+ DRM_DEBUG("dp_aux_ch timeout status 0x%08x\n", status);
+ return -ETIMEDOUT;
+ }
+
+ /* Unload any bytes sent back from the other side */
+ recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >>
+ DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
+
+ if (recv_bytes > recv_size)
+ recv_bytes = recv_size;
+
+ for (i = 0; i < recv_bytes; i += 4) {
+ uint32_t d = I915_READ(ch_data + i);
+
+ unpack_aux(d, recv + i, recv_bytes - i);
+ }
+
+ return recv_bytes;
+}
+
+/* Write data to the aux channel in native mode */
+static int
+intel_dp_aux_native_write(struct intel_output *intel_output,
+ uint16_t address, uint8_t *send, int send_bytes)
+{
+ int ret;
+ uint8_t msg[20];
+ int msg_bytes;
+ uint8_t ack;
+
+ if (send_bytes > 16)
+ return -1;
+ msg[0] = AUX_NATIVE_WRITE << 4;
+ msg[1] = address >> 8;
+ msg[2] = address;
+ msg[3] = send_bytes - 1;
+ memcpy(&msg[4], send, send_bytes);
+ msg_bytes = send_bytes + 4;
+ for (;;) {
+ ret = intel_dp_aux_ch(intel_output, msg, msg_bytes, &ack, 1);
+ if (ret < 0)
+ return ret;
+ if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK)
+ break;
+ else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER)
+ udelay(100);
+ else
+ return -EIO;
+ }
+ return send_bytes;
+}
+
+/* Write a single byte to the aux channel in native mode */
+static int
+intel_dp_aux_native_write_1(struct intel_output *intel_output,
+ uint16_t address, uint8_t byte)
+{
+ return intel_dp_aux_native_write(intel_output, address, &byte, 1);
+}
+
+/* read bytes from a native aux channel */
+static int
+intel_dp_aux_native_read(struct intel_output *intel_output,
+ uint16_t address, uint8_t *recv, int recv_bytes)
+{
+ uint8_t msg[4];
+ int msg_bytes;
+ uint8_t reply[20];
+ int reply_bytes;
+ uint8_t ack;
+ int ret;
+
+ msg[0] = AUX_NATIVE_READ << 4;
+ msg[1] = address >> 8;
+ msg[2] = address & 0xff;
+ msg[3] = recv_bytes - 1;
+
+ msg_bytes = 4;
+ reply_bytes = recv_bytes + 1;
+
+ for (;;) {
+ ret = intel_dp_aux_ch(intel_output, msg, msg_bytes,
+ reply, reply_bytes);
+ if (ret == 0)
+ return -EPROTO;
+ if (ret < 0)
+ return ret;
+ ack = reply[0];
+ if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK) {
+ memcpy(recv, reply + 1, ret - 1);
+ return ret - 1;
+ }
+ else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER)
+ udelay(100);
+ else
+ return -EIO;
+ }
+}
+
+static int
+intel_dp_i2c_aux_ch(struct i2c_adapter *adapter,
+ uint8_t *send, int send_bytes,
+ uint8_t *recv, int recv_bytes)
+{
+ struct intel_dp_priv *dp_priv = container_of(adapter,
+ struct intel_dp_priv,
+ adapter);
+ struct intel_output *intel_output = dp_priv->intel_output;
+
+ return intel_dp_aux_ch(intel_output,
+ send, send_bytes, recv, recv_bytes);
+}
+
+static int
+intel_dp_i2c_init(struct intel_output *intel_output, const char *name)
+{
+ struct intel_dp_priv *dp_priv = intel_output->dev_priv;
+
+ DRM_ERROR("i2c_init %s\n", name);
+ dp_priv->algo.running = false;
+ dp_priv->algo.address = 0;
+ dp_priv->algo.aux_ch = intel_dp_i2c_aux_ch;
+
+ memset(&dp_priv->adapter, '\0', sizeof (dp_priv->adapter));
+ dp_priv->adapter.owner = THIS_MODULE;
+ dp_priv->adapter.class = I2C_CLASS_DDC;
+ strncpy (dp_priv->adapter.name, name, sizeof dp_priv->adapter.name - 1);
+ dp_priv->adapter.name[sizeof dp_priv->adapter.name - 1] = '\0';
+ dp_priv->adapter.algo_data = &dp_priv->algo;
+ dp_priv->adapter.dev.parent = &intel_output->base.kdev;
+
+ return i2c_dp_aux_add_bus(&dp_priv->adapter);
+}
+
+static bool
+intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct intel_output *intel_output = enc_to_intel_output(encoder);
+ struct intel_dp_priv *dp_priv = intel_output->dev_priv;
+ int lane_count, clock;
+ int max_lane_count = intel_dp_max_lane_count(intel_output);
+ int max_clock = intel_dp_max_link_bw(intel_output) == DP_LINK_BW_2_7 ? 1 : 0;
+ static int bws[2] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7 };
+
+ for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) {
+ for (clock = 0; clock <= max_clock; clock++) {
+ int link_avail = intel_dp_link_clock(bws[clock]) * lane_count;
+
+ if (intel_dp_link_required(mode->clock) <= link_avail) {
+ dp_priv->link_bw = bws[clock];
+ dp_priv->lane_count = lane_count;
+ adjusted_mode->clock = intel_dp_link_clock(dp_priv->link_bw);
+ DRM_DEBUG("Display port link bw %02x lane count %d clock %d\n",
+ dp_priv->link_bw, dp_priv->lane_count,
+ adjusted_mode->clock);
+ return true;
+ }
+ }
+ }
+ return false;
+}
+
+struct intel_dp_m_n {
+ uint32_t tu;
+ uint32_t gmch_m;
+ uint32_t gmch_n;
+ uint32_t link_m;
+ uint32_t link_n;
+};
+
+static void
+intel_reduce_ratio(uint32_t *num, uint32_t *den)
+{
+ while (*num > 0xffffff || *den > 0xffffff) {
+ *num >>= 1;
+ *den >>= 1;
+ }
+}
+
+static void
+intel_dp_compute_m_n(int bytes_per_pixel,
+ int nlanes,
+ int pixel_clock,
+ int link_clock,
+ struct intel_dp_m_n *m_n)
+{
+ m_n->tu = 64;
+ m_n->gmch_m = pixel_clock * bytes_per_pixel;
+ m_n->gmch_n = link_clock * nlanes;
+ intel_reduce_ratio(&m_n->gmch_m, &m_n->gmch_n);
+ m_n->link_m = pixel_clock;
+ m_n->link_n = link_clock;
+ intel_reduce_ratio(&m_n->link_m, &m_n->link_n);
+}
+
+void
+intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_mode_config *mode_config = &dev->mode_config;
+ struct drm_connector *connector;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int lane_count = 4;
+ struct intel_dp_m_n m_n;
+
+ /*
+ * Find the lane count in the intel_output private
+ */
+ list_for_each_entry(connector, &mode_config->connector_list, head) {
+ struct intel_output *intel_output = to_intel_output(connector);
+ struct intel_dp_priv *dp_priv = intel_output->dev_priv;
+
+ if (!connector->encoder || connector->encoder->crtc != crtc)
+ continue;
+
+ if (intel_output->type == INTEL_OUTPUT_DISPLAYPORT) {
+ lane_count = dp_priv->lane_count;
+ break;
+ }
+ }
+
+ /*
+ * Compute the GMCH and Link ratios. The '3' here is
+ * the number of bytes_per_pixel post-LUT, which we always
+ * set up for 8-bits of R/G/B, or 3 bytes total.
+ */
+ intel_dp_compute_m_n(3, lane_count,
+ mode->clock, adjusted_mode->clock, &m_n);
+
+ if (intel_crtc->pipe == 0) {
+ I915_WRITE(PIPEA_GMCH_DATA_M,
+ ((m_n.tu - 1) << PIPE_GMCH_DATA_M_TU_SIZE_SHIFT) |
+ m_n.gmch_m);
+ I915_WRITE(PIPEA_GMCH_DATA_N,
+ m_n.gmch_n);
+ I915_WRITE(PIPEA_DP_LINK_M, m_n.link_m);
+ I915_WRITE(PIPEA_DP_LINK_N, m_n.link_n);
+ } else {
+ I915_WRITE(PIPEB_GMCH_DATA_M,
+ ((m_n.tu - 1) << PIPE_GMCH_DATA_M_TU_SIZE_SHIFT) |
+ m_n.gmch_m);
+ I915_WRITE(PIPEB_GMCH_DATA_N,
+ m_n.gmch_n);
+ I915_WRITE(PIPEB_DP_LINK_M, m_n.link_m);
+ I915_WRITE(PIPEB_DP_LINK_N, m_n.link_n);
+ }
+}
+
+static void
+intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct intel_output *intel_output = enc_to_intel_output(encoder);
+ struct intel_dp_priv *dp_priv = intel_output->dev_priv;
+ struct drm_crtc *crtc = intel_output->enc.crtc;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+
+ dp_priv->DP = (DP_LINK_TRAIN_OFF |
+ DP_VOLTAGE_0_4 |
+ DP_PRE_EMPHASIS_0 |
+ DP_SYNC_VS_HIGH |
+ DP_SYNC_HS_HIGH);
+
+ switch (dp_priv->lane_count) {
+ case 1:
+ dp_priv->DP |= DP_PORT_WIDTH_1;
+ break;
+ case 2:
+ dp_priv->DP |= DP_PORT_WIDTH_2;
+ break;
+ case 4:
+ dp_priv->DP |= DP_PORT_WIDTH_4;
+ break;
+ }
+ if (dp_priv->has_audio)
+ dp_priv->DP |= DP_AUDIO_OUTPUT_ENABLE;
+
+ memset(dp_priv->link_configuration, 0, DP_LINK_CONFIGURATION_SIZE);
+ dp_priv->link_configuration[0] = dp_priv->link_bw;
+ dp_priv->link_configuration[1] = dp_priv->lane_count;
+
+ /*
+ * Check for DPCD version > 1.1,
+ * enable enahanced frame stuff in that case
+ */
+ if (dp_priv->dpcd[0] >= 0x11) {
+ dp_priv->link_configuration[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
+ dp_priv->DP |= DP_ENHANCED_FRAMING;
+ }
+
+ if (intel_crtc->pipe == 1)
+ dp_priv->DP |= DP_PIPEB_SELECT;
+}
+
+
+static void
+intel_dp_dpms(struct drm_encoder *encoder, int mode)
+{
+ struct intel_output *intel_output = enc_to_intel_output(encoder);
+ struct intel_dp_priv *dp_priv = intel_output->dev_priv;
+ struct drm_device *dev = intel_output->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t dp_reg = I915_READ(dp_priv->output_reg);
+
+ if (mode != DRM_MODE_DPMS_ON) {
+ if (dp_reg & DP_PORT_EN)
+ intel_dp_link_down(intel_output, dp_priv->DP);
+ } else {
+ if (!(dp_reg & DP_PORT_EN))
+ intel_dp_link_train(intel_output, dp_priv->DP, dp_priv->link_configuration);
+ }
+ dp_priv->dpms_mode = mode;
+}
+
+/*
+ * Fetch AUX CH registers 0x202 - 0x207 which contain
+ * link status information
+ */
+static bool
+intel_dp_get_link_status(struct intel_output *intel_output,
+ uint8_t link_status[DP_LINK_STATUS_SIZE])
+{
+ int ret;
+
+ ret = intel_dp_aux_native_read(intel_output,
+ DP_LANE0_1_STATUS,
+ link_status, DP_LINK_STATUS_SIZE);
+ if (ret != DP_LINK_STATUS_SIZE)
+ return false;
+ return true;
+}
+
+static uint8_t
+intel_dp_link_status(uint8_t link_status[DP_LINK_STATUS_SIZE],
+ int r)
+{
+ return link_status[r - DP_LANE0_1_STATUS];
+}
+
+static void
+intel_dp_save(struct drm_connector *connector)
+{
+ struct intel_output *intel_output = to_intel_output(connector);
+ struct drm_device *dev = intel_output->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_dp_priv *dp_priv = intel_output->dev_priv;
+
+ dp_priv->save_DP = I915_READ(dp_priv->output_reg);
+ intel_dp_aux_native_read(intel_output, DP_LINK_BW_SET,
+ dp_priv->save_link_configuration,
+ sizeof (dp_priv->save_link_configuration));
+}
+
+static uint8_t
+intel_get_adjust_request_voltage(uint8_t link_status[DP_LINK_STATUS_SIZE],
+ int lane)
+{
+ int i = DP_ADJUST_REQUEST_LANE0_1 + (lane >> 1);
+ int s = ((lane & 1) ?
+ DP_ADJUST_VOLTAGE_SWING_LANE1_SHIFT :
+ DP_ADJUST_VOLTAGE_SWING_LANE0_SHIFT);
+ uint8_t l = intel_dp_link_status(link_status, i);
+
+ return ((l >> s) & 3) << DP_TRAIN_VOLTAGE_SWING_SHIFT;
+}
+
+static uint8_t
+intel_get_adjust_request_pre_emphasis(uint8_t link_status[DP_LINK_STATUS_SIZE],
+ int lane)
+{
+ int i = DP_ADJUST_REQUEST_LANE0_1 + (lane >> 1);
+ int s = ((lane & 1) ?
+ DP_ADJUST_PRE_EMPHASIS_LANE1_SHIFT :
+ DP_ADJUST_PRE_EMPHASIS_LANE0_SHIFT);
+ uint8_t l = intel_dp_link_status(link_status, i);
+
+ return ((l >> s) & 3) << DP_TRAIN_PRE_EMPHASIS_SHIFT;
+}
+
+
+#if 0
+static char *voltage_names[] = {
+ "0.4V", "0.6V", "0.8V", "1.2V"
+};
+static char *pre_emph_names[] = {
+ "0dB", "3.5dB", "6dB", "9.5dB"
+};
+static char *link_train_names[] = {
+ "pattern 1", "pattern 2", "idle", "off"
+};
+#endif
+
+/*
+ * These are source-specific values; current Intel hardware supports
+ * a maximum voltage of 800mV and a maximum pre-emphasis of 6dB
+ */
+#define I830_DP_VOLTAGE_MAX DP_TRAIN_VOLTAGE_SWING_800
+
+static uint8_t
+intel_dp_pre_emphasis_max(uint8_t voltage_swing)
+{
+ switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
+ case DP_TRAIN_VOLTAGE_SWING_400:
+ return DP_TRAIN_PRE_EMPHASIS_6;
+ case DP_TRAIN_VOLTAGE_SWING_600:
+ return DP_TRAIN_PRE_EMPHASIS_6;
+ case DP_TRAIN_VOLTAGE_SWING_800:
+ return DP_TRAIN_PRE_EMPHASIS_3_5;
+ case DP_TRAIN_VOLTAGE_SWING_1200:
+ default:
+ return DP_TRAIN_PRE_EMPHASIS_0;
+ }
+}
+
+static void
+intel_get_adjust_train(struct intel_output *intel_output,
+ uint8_t link_status[DP_LINK_STATUS_SIZE],
+ int lane_count,
+ uint8_t train_set[4])
+{
+ uint8_t v = 0;
+ uint8_t p = 0;
+ int lane;
+
+ for (lane = 0; lane < lane_count; lane++) {
+ uint8_t this_v = intel_get_adjust_request_voltage(link_status, lane);
+ uint8_t this_p = intel_get_adjust_request_pre_emphasis(link_status, lane);
+
+ if (this_v > v)
+ v = this_v;
+ if (this_p > p)
+ p = this_p;
+ }
+
+ if (v >= I830_DP_VOLTAGE_MAX)
+ v = I830_DP_VOLTAGE_MAX | DP_TRAIN_MAX_SWING_REACHED;
+
+ if (p >= intel_dp_pre_emphasis_max(v))
+ p = intel_dp_pre_emphasis_max(v) | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
+
+ for (lane = 0; lane < 4; lane++)
+ train_set[lane] = v | p;
+}
+
+static uint32_t
+intel_dp_signal_levels(uint8_t train_set, int lane_count)
+{
+ uint32_t signal_levels = 0;
+
+ switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
+ case DP_TRAIN_VOLTAGE_SWING_400:
+ default:
+ signal_levels |= DP_VOLTAGE_0_4;
+ break;
+ case DP_TRAIN_VOLTAGE_SWING_600:
+ signal_levels |= DP_VOLTAGE_0_6;
+ break;
+ case DP_TRAIN_VOLTAGE_SWING_800:
+ signal_levels |= DP_VOLTAGE_0_8;
+ break;
+ case DP_TRAIN_VOLTAGE_SWING_1200:
+ signal_levels |= DP_VOLTAGE_1_2;
+ break;
+ }
+ switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
+ case DP_TRAIN_PRE_EMPHASIS_0:
+ default:
+ signal_levels |= DP_PRE_EMPHASIS_0;
+ break;
+ case DP_TRAIN_PRE_EMPHASIS_3_5:
+ signal_levels |= DP_PRE_EMPHASIS_3_5;
+ break;
+ case DP_TRAIN_PRE_EMPHASIS_6:
+ signal_levels |= DP_PRE_EMPHASIS_6;
+ break;
+ case DP_TRAIN_PRE_EMPHASIS_9_5:
+ signal_levels |= DP_PRE_EMPHASIS_9_5;
+ break;
+ }
+ return signal_levels;
+}
+
+static uint8_t
+intel_get_lane_status(uint8_t link_status[DP_LINK_STATUS_SIZE],
+ int lane)
+{
+ int i = DP_LANE0_1_STATUS + (lane >> 1);
+ int s = (lane & 1) * 4;
+ uint8_t l = intel_dp_link_status(link_status, i);
+
+ return (l >> s) & 0xf;
+}
+
+/* Check for clock recovery is done on all channels */
+static bool
+intel_clock_recovery_ok(uint8_t link_status[DP_LINK_STATUS_SIZE], int lane_count)
+{
+ int lane;
+ uint8_t lane_status;
+
+ for (lane = 0; lane < lane_count; lane++) {
+ lane_status = intel_get_lane_status(link_status, lane);
+ if ((lane_status & DP_LANE_CR_DONE) == 0)
+ return false;
+ }
+ return true;
+}
+
+/* Check to see if channel eq is done on all channels */
+#define CHANNEL_EQ_BITS (DP_LANE_CR_DONE|\
+ DP_LANE_CHANNEL_EQ_DONE|\
+ DP_LANE_SYMBOL_LOCKED)
+static bool
+intel_channel_eq_ok(uint8_t link_status[DP_LINK_STATUS_SIZE], int lane_count)
+{
+ uint8_t lane_align;
+ uint8_t lane_status;
+ int lane;
+
+ lane_align = intel_dp_link_status(link_status,
+ DP_LANE_ALIGN_STATUS_UPDATED);
+ if ((lane_align & DP_INTERLANE_ALIGN_DONE) == 0)
+ return false;
+ for (lane = 0; lane < lane_count; lane++) {
+ lane_status = intel_get_lane_status(link_status, lane);
+ if ((lane_status & CHANNEL_EQ_BITS) != CHANNEL_EQ_BITS)
+ return false;
+ }
+ return true;
+}
+
+static bool
+intel_dp_set_link_train(struct intel_output *intel_output,
+ uint32_t dp_reg_value,
+ uint8_t dp_train_pat,
+ uint8_t train_set[4],
+ bool first)
+{
+ struct drm_device *dev = intel_output->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_dp_priv *dp_priv = intel_output->dev_priv;
+ int ret;
+
+ I915_WRITE(dp_priv->output_reg, dp_reg_value);
+ POSTING_READ(dp_priv->output_reg);
+ if (first)
+ intel_wait_for_vblank(dev);
+
+ intel_dp_aux_native_write_1(intel_output,
+ DP_TRAINING_PATTERN_SET,
+ dp_train_pat);
+
+ ret = intel_dp_aux_native_write(intel_output,
+ DP_TRAINING_LANE0_SET, train_set, 4);
+ if (ret != 4)
+ return false;
+
+ return true;
+}
+
+static void
+intel_dp_link_train(struct intel_output *intel_output, uint32_t DP,
+ uint8_t link_configuration[DP_LINK_CONFIGURATION_SIZE])
+{
+ struct drm_device *dev = intel_output->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_dp_priv *dp_priv = intel_output->dev_priv;
+ uint8_t train_set[4];
+ uint8_t link_status[DP_LINK_STATUS_SIZE];
+ int i;
+ uint8_t voltage;
+ bool clock_recovery = false;
+ bool channel_eq = false;
+ bool first = true;
+ int tries;
+
+ /* Write the link configuration data */
+ intel_dp_aux_native_write(intel_output, 0x100,
+ link_configuration, DP_LINK_CONFIGURATION_SIZE);
+
+ DP |= DP_PORT_EN;
+ DP &= ~DP_LINK_TRAIN_MASK;
+ memset(train_set, 0, 4);
+ voltage = 0xff;
+ tries = 0;
+ clock_recovery = false;
+ for (;;) {
+ /* Use train_set[0] to set the voltage and pre emphasis values */
+ uint32_t signal_levels = intel_dp_signal_levels(train_set[0], dp_priv->lane_count);
+ DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
+
+ if (!intel_dp_set_link_train(intel_output, DP | DP_LINK_TRAIN_PAT_1,
+ DP_TRAINING_PATTERN_1, train_set, first))
+ break;
+ first = false;
+ /* Set training pattern 1 */
+
+ udelay(100);
+ if (!intel_dp_get_link_status(intel_output, link_status))
+ break;
+
+ if (intel_clock_recovery_ok(link_status, dp_priv->lane_count)) {
+ clock_recovery = true;
+ break;
+ }
+
+ /* Check to see if we've tried the max voltage */
+ for (i = 0; i < dp_priv->lane_count; i++)
+ if ((train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
+ break;
+ if (i == dp_priv->lane_count)
+ break;
+
+ /* Check to see if we've tried the same voltage 5 times */
+ if ((train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) {
+ ++tries;
+ if (tries == 5)
+ break;
+ } else
+ tries = 0;
+ voltage = train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
+
+ /* Compute new train_set as requested by target */
+ intel_get_adjust_train(intel_output, link_status, dp_priv->lane_count, train_set);
+ }
+
+ /* channel equalization */
+ tries = 0;
+ channel_eq = false;
+ for (;;) {
+ /* Use train_set[0] to set the voltage and pre emphasis values */
+ uint32_t signal_levels = intel_dp_signal_levels(train_set[0], dp_priv->lane_count);
+ DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
+
+ /* channel eq pattern */
+ if (!intel_dp_set_link_train(intel_output, DP | DP_LINK_TRAIN_PAT_2,
+ DP_TRAINING_PATTERN_2, train_set,
+ false))
+ break;
+
+ udelay(400);
+ if (!intel_dp_get_link_status(intel_output, link_status))
+ break;
+
+ if (intel_channel_eq_ok(link_status, dp_priv->lane_count)) {
+ channel_eq = true;
+ break;
+ }
+
+ /* Try 5 times */
+ if (tries > 5)
+ break;
+
+ /* Compute new train_set as requested by target */
+ intel_get_adjust_train(intel_output, link_status, dp_priv->lane_count, train_set);
+ ++tries;
+ }
+
+ I915_WRITE(dp_priv->output_reg, DP | DP_LINK_TRAIN_OFF);
+ POSTING_READ(dp_priv->output_reg);
+ intel_dp_aux_native_write_1(intel_output,
+ DP_TRAINING_PATTERN_SET, DP_TRAINING_PATTERN_DISABLE);
+}
+
+static void
+intel_dp_link_down(struct intel_output *intel_output, uint32_t DP)
+{
+ struct drm_device *dev = intel_output->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_dp_priv *dp_priv = intel_output->dev_priv;
+
+ I915_WRITE(dp_priv->output_reg, DP & ~DP_PORT_EN);
+ POSTING_READ(dp_priv->output_reg);
+}
+
+static void
+intel_dp_restore(struct drm_connector *connector)
+{
+ struct intel_output *intel_output = to_intel_output(connector);
+ struct intel_dp_priv *dp_priv = intel_output->dev_priv;
+
+ if (dp_priv->save_DP & DP_PORT_EN)
+ intel_dp_link_train(intel_output, dp_priv->save_DP, dp_priv->save_link_configuration);
+ else
+ intel_dp_link_down(intel_output, dp_priv->save_DP);
+}
+
+/*
+ * According to DP spec
+ * 5.1.2:
+ * 1. Read DPCD
+ * 2. Configure link according to Receiver Capabilities
+ * 3. Use Link Training from 2.5.3.3 and 3.5.1.3
+ * 4. Check link status on receipt of hot-plug interrupt
+ */
+
+static void
+intel_dp_check_link_status(struct intel_output *intel_output)
+{
+ struct intel_dp_priv *dp_priv = intel_output->dev_priv;
+ uint8_t link_status[DP_LINK_STATUS_SIZE];
+
+ if (!intel_output->enc.crtc)
+ return;
+
+ if (!intel_dp_get_link_status(intel_output, link_status)) {
+ intel_dp_link_down(intel_output, dp_priv->DP);
+ return;
+ }
+
+ if (!intel_channel_eq_ok(link_status, dp_priv->lane_count))
+ intel_dp_link_train(intel_output, dp_priv->DP, dp_priv->link_configuration);
+}
+
+/**
+ * Uses CRT_HOTPLUG_EN and CRT_HOTPLUG_STAT to detect DP connection.
+ *
+ * \return true if DP port is connected.
+ * \return false if DP port is disconnected.
+ */
+static enum drm_connector_status
+intel_dp_detect(struct drm_connector *connector)
+{
+ struct intel_output *intel_output = to_intel_output(connector);
+ struct drm_device *dev = intel_output->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_dp_priv *dp_priv = intel_output->dev_priv;
+ uint32_t temp, bit;
+ enum drm_connector_status status;
+
+ dp_priv->has_audio = false;
+
+ temp = I915_READ(PORT_HOTPLUG_EN);
+
+ I915_WRITE(PORT_HOTPLUG_EN,
+ temp |
+ DPB_HOTPLUG_INT_EN |
+ DPC_HOTPLUG_INT_EN |
+ DPD_HOTPLUG_INT_EN);
+
+ POSTING_READ(PORT_HOTPLUG_EN);
+
+ switch (dp_priv->output_reg) {
+ case DP_B:
+ bit = DPB_HOTPLUG_INT_STATUS;
+ break;
+ case DP_C:
+ bit = DPC_HOTPLUG_INT_STATUS;
+ break;
+ case DP_D:
+ bit = DPD_HOTPLUG_INT_STATUS;
+ break;
+ default:
+ return connector_status_unknown;
+ }
+
+ temp = I915_READ(PORT_HOTPLUG_STAT);
+
+ if ((temp & bit) == 0)
+ return connector_status_disconnected;
+
+ status = connector_status_disconnected;
+ if (intel_dp_aux_native_read(intel_output,
+ 0x000, dp_priv->dpcd,
+ sizeof (dp_priv->dpcd)) == sizeof (dp_priv->dpcd))
+ {
+ if (dp_priv->dpcd[0] != 0)
+ status = connector_status_connected;
+ }
+ return status;
+}
+
+static int intel_dp_get_modes(struct drm_connector *connector)
+{
+ struct intel_output *intel_output = to_intel_output(connector);
+
+ /* We should parse the EDID data and find out if it has an audio sink
+ */
+
+ return intel_ddc_get_modes(intel_output);
+}
+
+static void
+intel_dp_destroy (struct drm_connector *connector)
+{
+ struct intel_output *intel_output = to_intel_output(connector);
+
+ if (intel_output->i2c_bus)
+ intel_i2c_destroy(intel_output->i2c_bus);
+ drm_sysfs_connector_remove(connector);
+ drm_connector_cleanup(connector);
+ kfree(intel_output);
+}
+
+static const struct drm_encoder_helper_funcs intel_dp_helper_funcs = {
+ .dpms = intel_dp_dpms,
+ .mode_fixup = intel_dp_mode_fixup,
+ .prepare = intel_encoder_prepare,
+ .mode_set = intel_dp_mode_set,
+ .commit = intel_encoder_commit,
+};
+
+static const struct drm_connector_funcs intel_dp_connector_funcs = {
+ .dpms = drm_helper_connector_dpms,
+ .save = intel_dp_save,
+ .restore = intel_dp_restore,
+ .detect = intel_dp_detect,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .destroy = intel_dp_destroy,
+};
+
+static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = {
+ .get_modes = intel_dp_get_modes,
+ .mode_valid = intel_dp_mode_valid,
+ .best_encoder = intel_best_encoder,
+};
+
+static void intel_dp_enc_destroy(struct drm_encoder *encoder)
+{
+ drm_encoder_cleanup(encoder);
+}
+
+static const struct drm_encoder_funcs intel_dp_enc_funcs = {
+ .destroy = intel_dp_enc_destroy,
+};
+
+void
+intel_dp_hot_plug(struct intel_output *intel_output)
+{
+ struct intel_dp_priv *dp_priv = intel_output->dev_priv;
+
+ if (dp_priv->dpms_mode == DRM_MODE_DPMS_ON)
+ intel_dp_check_link_status(intel_output);
+}
+
+void
+intel_dp_init(struct drm_device *dev, int output_reg)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_connector *connector;
+ struct intel_output *intel_output;
+ struct intel_dp_priv *dp_priv;
+
+ intel_output = kcalloc(sizeof(struct intel_output) +
+ sizeof(struct intel_dp_priv), 1, GFP_KERNEL);
+ if (!intel_output)
+ return;
+
+ dp_priv = (struct intel_dp_priv *)(intel_output + 1);
+
+ connector = &intel_output->base;
+ drm_connector_init(dev, connector, &intel_dp_connector_funcs,
+ DRM_MODE_CONNECTOR_DisplayPort);
+ drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
+
+ intel_output->type = INTEL_OUTPUT_DISPLAYPORT;
+
+ connector->interlace_allowed = true;
+ connector->doublescan_allowed = 0;
+
+ dp_priv->intel_output = intel_output;
+ dp_priv->output_reg = output_reg;
+ dp_priv->has_audio = false;
+ dp_priv->dpms_mode = DRM_MODE_DPMS_ON;
+ intel_output->dev_priv = dp_priv;
+
+ drm_encoder_init(dev, &intel_output->enc, &intel_dp_enc_funcs,
+ DRM_MODE_ENCODER_TMDS);
+ drm_encoder_helper_add(&intel_output->enc, &intel_dp_helper_funcs);
+
+ drm_mode_connector_attach_encoder(&intel_output->base,
+ &intel_output->enc);
+ drm_sysfs_connector_add(connector);
+
+ /* Set up the DDC bus. */
+ intel_dp_i2c_init(intel_output,
+ (output_reg == DP_B) ? "DPDDC-B" :
+ (output_reg == DP_C) ? "DPDDC-C" : "DPDDC-D");
+ intel_output->ddc_bus = &dp_priv->adapter;
+ intel_output->hot_plug = intel_dp_hot_plug;
+
+ /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
+ * 0xd. Failure to do so will result in spurious interrupts being
+ * generated on the port when a cable is not attached.
+ */
+ if (IS_G4X(dev) && !IS_GM45(dev)) {
+ u32 temp = I915_READ(PEG_BAND_GAP_DATA);
+ I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
+ }
+}
diff --git a/drivers/gpu/drm/i915/intel_dp.h b/drivers/gpu/drm/i915/intel_dp.h
new file mode 100644
index 000000000000..2b38054d3b6d
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_dp.h
@@ -0,0 +1,144 @@
+/*
+ * Copyright © 2008 Keith Packard
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that copyright
+ * notice and this permission notice appear in supporting documentation, and
+ * that the name of the copyright holders not be used in advertising or
+ * publicity pertaining to distribution of the software without specific,
+ * written prior permission. The copyright holders make no representations
+ * about the suitability of this software for any purpose. It is provided "as
+ * is" without express or implied warranty.
+ *
+ * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+ * OF THIS SOFTWARE.
+ */
+
+#ifndef _INTEL_DP_H_
+#define _INTEL_DP_H_
+
+/* From the VESA DisplayPort spec */
+
+#define AUX_NATIVE_WRITE 0x8
+#define AUX_NATIVE_READ 0x9
+#define AUX_I2C_WRITE 0x0
+#define AUX_I2C_READ 0x1
+#define AUX_I2C_STATUS 0x2
+#define AUX_I2C_MOT 0x4
+
+#define AUX_NATIVE_REPLY_ACK (0x0 << 4)
+#define AUX_NATIVE_REPLY_NACK (0x1 << 4)
+#define AUX_NATIVE_REPLY_DEFER (0x2 << 4)
+#define AUX_NATIVE_REPLY_MASK (0x3 << 4)
+
+#define AUX_I2C_REPLY_ACK (0x0 << 6)
+#define AUX_I2C_REPLY_NACK (0x1 << 6)
+#define AUX_I2C_REPLY_DEFER (0x2 << 6)
+#define AUX_I2C_REPLY_MASK (0x3 << 6)
+
+/* AUX CH addresses */
+#define DP_LINK_BW_SET 0x100
+# define DP_LINK_BW_1_62 0x06
+# define DP_LINK_BW_2_7 0x0a
+
+#define DP_LANE_COUNT_SET 0x101
+# define DP_LANE_COUNT_MASK 0x0f
+# define DP_LANE_COUNT_ENHANCED_FRAME_EN (1 << 7)
+
+#define DP_TRAINING_PATTERN_SET 0x102
+
+# define DP_TRAINING_PATTERN_DISABLE 0
+# define DP_TRAINING_PATTERN_1 1
+# define DP_TRAINING_PATTERN_2 2
+# define DP_TRAINING_PATTERN_MASK 0x3
+
+# define DP_LINK_QUAL_PATTERN_DISABLE (0 << 2)
+# define DP_LINK_QUAL_PATTERN_D10_2 (1 << 2)
+# define DP_LINK_QUAL_PATTERN_ERROR_RATE (2 << 2)
+# define DP_LINK_QUAL_PATTERN_PRBS7 (3 << 2)
+# define DP_LINK_QUAL_PATTERN_MASK (3 << 2)
+
+# define DP_RECOVERED_CLOCK_OUT_EN (1 << 4)
+# define DP_LINK_SCRAMBLING_DISABLE (1 << 5)
+
+# define DP_SYMBOL_ERROR_COUNT_BOTH (0 << 6)
+# define DP_SYMBOL_ERROR_COUNT_DISPARITY (1 << 6)
+# define DP_SYMBOL_ERROR_COUNT_SYMBOL (2 << 6)
+# define DP_SYMBOL_ERROR_COUNT_MASK (3 << 6)
+
+#define DP_TRAINING_LANE0_SET 0x103
+#define DP_TRAINING_LANE1_SET 0x104
+#define DP_TRAINING_LANE2_SET 0x105
+#define DP_TRAINING_LANE3_SET 0x106
+
+# define DP_TRAIN_VOLTAGE_SWING_MASK 0x3
+# define DP_TRAIN_VOLTAGE_SWING_SHIFT 0
+# define DP_TRAIN_MAX_SWING_REACHED (1 << 2)
+# define DP_TRAIN_VOLTAGE_SWING_400 (0 << 0)
+# define DP_TRAIN_VOLTAGE_SWING_600 (1 << 0)
+# define DP_TRAIN_VOLTAGE_SWING_800 (2 << 0)
+# define DP_TRAIN_VOLTAGE_SWING_1200 (3 << 0)
+
+# define DP_TRAIN_PRE_EMPHASIS_MASK (3 << 3)
+# define DP_TRAIN_PRE_EMPHASIS_0 (0 << 3)
+# define DP_TRAIN_PRE_EMPHASIS_3_5 (1 << 3)
+# define DP_TRAIN_PRE_EMPHASIS_6 (2 << 3)
+# define DP_TRAIN_PRE_EMPHASIS_9_5 (3 << 3)
+
+# define DP_TRAIN_PRE_EMPHASIS_SHIFT 3
+# define DP_TRAIN_MAX_PRE_EMPHASIS_REACHED (1 << 5)
+
+#define DP_DOWNSPREAD_CTRL 0x107
+# define DP_SPREAD_AMP_0_5 (1 << 4)
+
+#define DP_MAIN_LINK_CHANNEL_CODING_SET 0x108
+# define DP_SET_ANSI_8B10B (1 << 0)
+
+#define DP_LANE0_1_STATUS 0x202
+#define DP_LANE2_3_STATUS 0x203
+
+# define DP_LANE_CR_DONE (1 << 0)
+# define DP_LANE_CHANNEL_EQ_DONE (1 << 1)
+# define DP_LANE_SYMBOL_LOCKED (1 << 2)
+
+#define DP_LANE_ALIGN_STATUS_UPDATED 0x204
+
+#define DP_INTERLANE_ALIGN_DONE (1 << 0)
+#define DP_DOWNSTREAM_PORT_STATUS_CHANGED (1 << 6)
+#define DP_LINK_STATUS_UPDATED (1 << 7)
+
+#define DP_SINK_STATUS 0x205
+
+#define DP_RECEIVE_PORT_0_STATUS (1 << 0)
+#define DP_RECEIVE_PORT_1_STATUS (1 << 1)
+
+#define DP_ADJUST_REQUEST_LANE0_1 0x206
+#define DP_ADJUST_REQUEST_LANE2_3 0x207
+
+#define DP_ADJUST_VOLTAGE_SWING_LANE0_MASK 0x03
+#define DP_ADJUST_VOLTAGE_SWING_LANE0_SHIFT 0
+#define DP_ADJUST_PRE_EMPHASIS_LANE0_MASK 0x0c
+#define DP_ADJUST_PRE_EMPHASIS_LANE0_SHIFT 2
+#define DP_ADJUST_VOLTAGE_SWING_LANE1_MASK 0x30
+#define DP_ADJUST_VOLTAGE_SWING_LANE1_SHIFT 4
+#define DP_ADJUST_PRE_EMPHASIS_LANE1_MASK 0xc0
+#define DP_ADJUST_PRE_EMPHASIS_LANE1_SHIFT 6
+
+struct i2c_algo_dp_aux_data {
+ bool running;
+ u16 address;
+ int (*aux_ch) (struct i2c_adapter *adapter,
+ uint8_t *send, int send_bytes,
+ uint8_t *recv, int recv_bytes);
+};
+
+int
+i2c_dp_aux_add_bus(struct i2c_adapter *adapter);
+
+#endif /* _INTEL_DP_H_ */
diff --git a/drivers/gpu/drm/i915/intel_dp_i2c.c b/drivers/gpu/drm/i915/intel_dp_i2c.c
new file mode 100644
index 000000000000..a63b6f57d2d4
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_dp_i2c.c
@@ -0,0 +1,273 @@
+/*
+ * Copyright © 2009 Keith Packard
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that copyright
+ * notice and this permission notice appear in supporting documentation, and
+ * that the name of the copyright holders not be used in advertising or
+ * publicity pertaining to distribution of the software without specific,
+ * written prior permission. The copyright holders make no representations
+ * about the suitability of this software for any purpose. It is provided "as
+ * is" without express or implied warranty.
+ *
+ * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+ * OF THIS SOFTWARE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/sched.h>
+#include <linux/i2c.h>
+#include "intel_dp.h"
+#include "drmP.h"
+
+/* Run a single AUX_CH I2C transaction, writing/reading data as necessary */
+
+#define MODE_I2C_START 1
+#define MODE_I2C_WRITE 2
+#define MODE_I2C_READ 4
+#define MODE_I2C_STOP 8
+
+static int
+i2c_algo_dp_aux_transaction(struct i2c_adapter *adapter, int mode,
+ uint8_t write_byte, uint8_t *read_byte)
+{
+ struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
+ uint16_t address = algo_data->address;
+ uint8_t msg[5];
+ uint8_t reply[2];
+ int msg_bytes;
+ int reply_bytes;
+ int ret;
+
+ /* Set up the command byte */
+ if (mode & MODE_I2C_READ)
+ msg[0] = AUX_I2C_READ << 4;
+ else
+ msg[0] = AUX_I2C_WRITE << 4;
+
+ if (!(mode & MODE_I2C_STOP))
+ msg[0] |= AUX_I2C_MOT << 4;
+
+ msg[1] = address >> 8;
+ msg[2] = address;
+
+ switch (mode) {
+ case MODE_I2C_WRITE:
+ msg[3] = 0;
+ msg[4] = write_byte;
+ msg_bytes = 5;
+ reply_bytes = 1;
+ break;
+ case MODE_I2C_READ:
+ msg[3] = 0;
+ msg_bytes = 4;
+ reply_bytes = 2;
+ break;
+ default:
+ msg_bytes = 3;
+ reply_bytes = 1;
+ break;
+ }
+
+ for (;;) {
+ ret = (*algo_data->aux_ch)(adapter,
+ msg, msg_bytes,
+ reply, reply_bytes);
+ if (ret < 0) {
+ DRM_DEBUG("aux_ch failed %d\n", ret);
+ return ret;
+ }
+ switch (reply[0] & AUX_I2C_REPLY_MASK) {
+ case AUX_I2C_REPLY_ACK:
+ if (mode == MODE_I2C_READ) {
+ *read_byte = reply[1];
+ }
+ return reply_bytes - 1;
+ case AUX_I2C_REPLY_NACK:
+ DRM_DEBUG("aux_ch nack\n");
+ return -EREMOTEIO;
+ case AUX_I2C_REPLY_DEFER:
+ DRM_DEBUG("aux_ch defer\n");
+ udelay(100);
+ break;
+ default:
+ DRM_ERROR("aux_ch invalid reply 0x%02x\n", reply[0]);
+ return -EREMOTEIO;
+ }
+ }
+}
+
+/*
+ * I2C over AUX CH
+ */
+
+/*
+ * Send the address. If the I2C link is running, this 'restarts'
+ * the connection with the new address, this is used for doing
+ * a write followed by a read (as needed for DDC)
+ */
+static int
+i2c_algo_dp_aux_address(struct i2c_adapter *adapter, u16 address, bool reading)
+{
+ struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
+ int mode = MODE_I2C_START;
+ int ret;
+
+ if (reading)
+ mode |= MODE_I2C_READ;
+ else
+ mode |= MODE_I2C_WRITE;
+ algo_data->address = address;
+ algo_data->running = true;
+ ret = i2c_algo_dp_aux_transaction(adapter, mode, 0, NULL);
+ return ret;
+}
+
+/*
+ * Stop the I2C transaction. This closes out the link, sending
+ * a bare address packet with the MOT bit turned off
+ */
+static void
+i2c_algo_dp_aux_stop(struct i2c_adapter *adapter, bool reading)
+{
+ struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
+ int mode = MODE_I2C_STOP;
+
+ if (reading)
+ mode |= MODE_I2C_READ;
+ else
+ mode |= MODE_I2C_WRITE;
+ if (algo_data->running) {
+ (void) i2c_algo_dp_aux_transaction(adapter, mode, 0, NULL);
+ algo_data->running = false;
+ }
+}
+
+/*
+ * Write a single byte to the current I2C address, the
+ * the I2C link must be running or this returns -EIO
+ */
+static int
+i2c_algo_dp_aux_put_byte(struct i2c_adapter *adapter, u8 byte)
+{
+ struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
+ int ret;
+
+ if (!algo_data->running)
+ return -EIO;
+
+ ret = i2c_algo_dp_aux_transaction(adapter, MODE_I2C_WRITE, byte, NULL);
+ return ret;
+}
+
+/*
+ * Read a single byte from the current I2C address, the
+ * I2C link must be running or this returns -EIO
+ */
+static int
+i2c_algo_dp_aux_get_byte(struct i2c_adapter *adapter, u8 *byte_ret)
+{
+ struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
+ int ret;
+
+ if (!algo_data->running)
+ return -EIO;
+
+ ret = i2c_algo_dp_aux_transaction(adapter, MODE_I2C_READ, 0, byte_ret);
+ return ret;
+}
+
+static int
+i2c_algo_dp_aux_xfer(struct i2c_adapter *adapter,
+ struct i2c_msg *msgs,
+ int num)
+{
+ int ret = 0;
+ bool reading = false;
+ int m;
+ int b;
+
+ for (m = 0; m < num; m++) {
+ u16 len = msgs[m].len;
+ u8 *buf = msgs[m].buf;
+ reading = (msgs[m].flags & I2C_M_RD) != 0;
+ ret = i2c_algo_dp_aux_address(adapter, msgs[m].addr, reading);
+ if (ret < 0)
+ break;
+ if (reading) {
+ for (b = 0; b < len; b++) {
+ ret = i2c_algo_dp_aux_get_byte(adapter, &buf[b]);
+ if (ret < 0)
+ break;
+ }
+ } else {
+ for (b = 0; b < len; b++) {
+ ret = i2c_algo_dp_aux_put_byte(adapter, buf[b]);
+ if (ret < 0)
+ break;
+ }
+ }
+ if (ret < 0)
+ break;
+ }
+ if (ret >= 0)
+ ret = num;
+ i2c_algo_dp_aux_stop(adapter, reading);
+ DRM_DEBUG("dp_aux_xfer return %d\n", ret);
+ return ret;
+}
+
+static u32
+i2c_algo_dp_aux_functionality(struct i2c_adapter *adapter)
+{
+ return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL |
+ I2C_FUNC_SMBUS_READ_BLOCK_DATA |
+ I2C_FUNC_SMBUS_BLOCK_PROC_CALL |
+ I2C_FUNC_10BIT_ADDR;
+}
+
+static const struct i2c_algorithm i2c_dp_aux_algo = {
+ .master_xfer = i2c_algo_dp_aux_xfer,
+ .functionality = i2c_algo_dp_aux_functionality,
+};
+
+static void
+i2c_dp_aux_reset_bus(struct i2c_adapter *adapter)
+{
+ (void) i2c_algo_dp_aux_address(adapter, 0, false);
+ (void) i2c_algo_dp_aux_stop(adapter, false);
+
+}
+
+static int
+i2c_dp_aux_prepare_bus(struct i2c_adapter *adapter)
+{
+ adapter->algo = &i2c_dp_aux_algo;
+ adapter->retries = 3;
+ i2c_dp_aux_reset_bus(adapter);
+ return 0;
+}
+
+int
+i2c_dp_aux_add_bus(struct i2c_adapter *adapter)
+{
+ int error;
+
+ error = i2c_dp_aux_prepare_bus(adapter);
+ if (error)
+ return error;
+ error = i2c_add_adapter(adapter);
+ return error;
+}
+EXPORT_SYMBOL(i2c_dp_aux_add_bus);
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index cd4b9c5f715e..004541c935a8 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -54,6 +54,7 @@
#define INTEL_OUTPUT_LVDS 4
#define INTEL_OUTPUT_TVOUT 5
#define INTEL_OUTPUT_HDMI 6
+#define INTEL_OUTPUT_DISPLAYPORT 7
#define INTEL_DVO_CHIP_NONE 0
#define INTEL_DVO_CHIP_LVDS 1
@@ -65,7 +66,6 @@ struct intel_i2c_chan {
u32 reg; /* GPIO reg */
struct i2c_adapter adapter;
struct i2c_algo_bit_data algo;
- u8 slave_addr;
};
struct intel_framebuffer {
@@ -79,11 +79,12 @@ struct intel_output {
struct drm_encoder enc;
int type;
- struct intel_i2c_chan *i2c_bus; /* for control functions */
- struct intel_i2c_chan *ddc_bus; /* for DDC only stuff */
+ struct i2c_adapter *i2c_bus;
+ struct i2c_adapter *ddc_bus;
bool load_detect_temp;
bool needs_tv_clock;
void *dev_priv;
+ void (*hot_plug)(struct intel_output *);
};
struct intel_crtc {
@@ -104,9 +105,9 @@ struct intel_crtc {
#define enc_to_intel_output(x) container_of(x, struct intel_output, enc)
#define to_intel_framebuffer(x) container_of(x, struct intel_framebuffer, base)
-struct intel_i2c_chan *intel_i2c_create(struct drm_device *dev, const u32 reg,
- const char *name);
-void intel_i2c_destroy(struct intel_i2c_chan *chan);
+struct i2c_adapter *intel_i2c_create(struct drm_device *dev, const u32 reg,
+ const char *name);
+void intel_i2c_destroy(struct i2c_adapter *adapter);
int intel_ddc_get_modes(struct intel_output *intel_output);
extern bool intel_ddc_probe(struct intel_output *intel_output);
void intel_i2c_quirk_set(struct drm_device *dev, bool enable);
@@ -116,6 +117,10 @@ extern bool intel_sdvo_init(struct drm_device *dev, int output_device);
extern void intel_dvo_init(struct drm_device *dev);
extern void intel_tv_init(struct drm_device *dev);
extern void intel_lvds_init(struct drm_device *dev);
+extern void intel_dp_init(struct drm_device *dev, int dp_reg);
+void
+intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode);
extern void intel_crtc_load_lut(struct drm_crtc *crtc);
extern void intel_encoder_prepare (struct drm_encoder *encoder);
diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c
index 1ee3007d6ec0..13bff20930e8 100644
--- a/drivers/gpu/drm/i915/intel_dvo.c
+++ b/drivers/gpu/drm/i915/intel_dvo.c
@@ -384,10 +384,9 @@ void intel_dvo_init(struct drm_device *dev)
{
struct intel_output *intel_output;
struct intel_dvo_device *dvo;
- struct intel_i2c_chan *i2cbus = NULL;
+ struct i2c_adapter *i2cbus = NULL;
int ret = 0;
int i;
- int gpio_inited = 0;
int encoder_type = DRM_MODE_ENCODER_NONE;
intel_output = kzalloc (sizeof(struct intel_output), GFP_KERNEL);
if (!intel_output)
@@ -420,14 +419,11 @@ void intel_dvo_init(struct drm_device *dev)
* It appears that everything is on GPIOE except for panels
* on i830 laptops, which are on GPIOB (DVOA).
*/
- if (gpio_inited != gpio) {
- if (i2cbus != NULL)
- intel_i2c_destroy(i2cbus);
- if (!(i2cbus = intel_i2c_create(dev, gpio,
- gpio == GPIOB ? "DVOI2C_B" : "DVOI2C_E"))) {
- continue;
- }
- gpio_inited = gpio;
+ if (i2cbus != NULL)
+ intel_i2c_destroy(i2cbus);
+ if (!(i2cbus = intel_i2c_create(dev, gpio,
+ gpio == GPIOB ? "DVOI2C_B" : "DVOI2C_E"))) {
+ continue;
}
if (dvo->dev_ops!= NULL)
diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c
index 1af7d68e3807..1d30802e773e 100644
--- a/drivers/gpu/drm/i915/intel_fb.c
+++ b/drivers/gpu/drm/i915/intel_fb.c
@@ -453,7 +453,7 @@ static int intelfb_create(struct drm_device *dev, uint32_t fb_width,
size = ALIGN(size, PAGE_SIZE);
fbo = drm_gem_object_alloc(dev, size);
if (!fbo) {
- printk(KERN_ERR "failed to allocate framebuffer\n");
+ DRM_ERROR("failed to allocate framebuffer\n");
ret = -ENOMEM;
goto out;
}
@@ -610,8 +610,8 @@ static int intelfb_create(struct drm_device *dev, uint32_t fb_width,
par->dev = dev;
/* To allow resizeing without swapping buffers */
- printk("allocated %dx%d fb: 0x%08x, bo %p\n", intel_fb->base.width,
- intel_fb->base.height, obj_priv->gtt_offset, fbo);
+ DRM_DEBUG("allocated %dx%d fb: 0x%08x, bo %p\n", intel_fb->base.width,
+ intel_fb->base.height, obj_priv->gtt_offset, fbo);
mutex_unlock(&dev->struct_mutex);
return 0;
@@ -698,13 +698,13 @@ static int intelfb_multi_fb_probe_crtc(struct drm_device *dev, struct drm_crtc *
} else
intelfb_set_par(info);
- printk(KERN_INFO "fb%d: %s frame buffer device\n", info->node,
+ DRM_INFO("fb%d: %s frame buffer device\n", info->node,
info->fix.id);
/* Switch back to kernel console on panic */
kernelfb_mode = *modeset;
atomic_notifier_chain_register(&panic_notifier_list, &paniced);
- printk(KERN_INFO "registered panic notifier\n");
+ DRM_DEBUG("registered panic notifier\n");
return 0;
}
@@ -852,13 +852,13 @@ static int intelfb_single_fb_probe(struct drm_device *dev)
} else
intelfb_set_par(info);
- printk(KERN_INFO "fb%d: %s frame buffer device\n", info->node,
+ DRM_INFO("fb%d: %s frame buffer device\n", info->node,
info->fix.id);
/* Switch back to kernel console on panic */
kernelfb_mode = *modeset;
atomic_notifier_chain_register(&panic_notifier_list, &paniced);
- printk(KERN_INFO "registered panic notifier\n");
+ DRM_DEBUG("registered panic notifier\n");
return 0;
}
@@ -872,8 +872,8 @@ void intelfb_restore(void)
{
int ret;
if ((ret = drm_crtc_helper_set_config(&kernelfb_mode)) != 0) {
- printk(KERN_ERR "Failed to restore crtc configuration: %d\n",
- ret);
+ DRM_ERROR("Failed to restore crtc configuration: %d\n",
+ ret);
}
}
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index 4ea2a651b92c..9e30daae37dc 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -31,6 +31,7 @@
#include "drmP.h"
#include "drm.h"
#include "drm_crtc.h"
+#include "drm_edid.h"
#include "intel_drv.h"
#include "i915_drm.h"
#include "i915_drv.h"
@@ -56,8 +57,7 @@ static void intel_hdmi_mode_set(struct drm_encoder *encoder,
sdvox = SDVO_ENCODING_HDMI |
SDVO_BORDER_ENABLE |
SDVO_VSYNC_ACTIVE_HIGH |
- SDVO_HSYNC_ACTIVE_HIGH |
- SDVO_NULL_PACKETS_DURING_VSYNC;
+ SDVO_HSYNC_ACTIVE_HIGH;
if (hdmi_priv->has_hdmi_sink)
sdvox |= SDVO_AUDIO_ENABLE;
@@ -129,20 +129,26 @@ static bool intel_hdmi_mode_fixup(struct drm_encoder *encoder,
return true;
}
-static void
-intel_hdmi_sink_detect(struct drm_connector *connector)
+static enum drm_connector_status
+intel_hdmi_edid_detect(struct drm_connector *connector)
{
struct intel_output *intel_output = to_intel_output(connector);
struct intel_hdmi_priv *hdmi_priv = intel_output->dev_priv;
struct edid *edid = NULL;
+ enum drm_connector_status status = connector_status_disconnected;
edid = drm_get_edid(&intel_output->base,
- &intel_output->ddc_bus->adapter);
- if (edid != NULL) {
- hdmi_priv->has_hdmi_sink = drm_detect_hdmi_monitor(edid);
- kfree(edid);
+ intel_output->ddc_bus);
+ hdmi_priv->has_hdmi_sink = false;
+ if (edid) {
+ if (edid->input & DRM_EDID_INPUT_DIGITAL) {
+ status = connector_status_connected;
+ hdmi_priv->has_hdmi_sink = drm_detect_hdmi_monitor(edid);
+ }
intel_output->base.display_info.raw_edid = NULL;
+ kfree(edid);
}
+ return status;
}
static enum drm_connector_status
@@ -154,11 +160,7 @@ igdng_hdmi_detect(struct drm_connector *connector)
/* FIXME hotplug detect */
hdmi_priv->has_hdmi_sink = false;
- intel_hdmi_sink_detect(connector);
- if (hdmi_priv->has_hdmi_sink)
- return connector_status_connected;
- else
- return connector_status_disconnected;
+ return intel_hdmi_edid_detect(connector);
}
static enum drm_connector_status
@@ -201,10 +203,9 @@ intel_hdmi_detect(struct drm_connector *connector)
return connector_status_unknown;
}
- if ((I915_READ(PORT_HOTPLUG_STAT) & bit) != 0) {
- intel_hdmi_sink_detect(connector);
- return connector_status_connected;
- } else
+ if ((I915_READ(PORT_HOTPLUG_STAT) & bit) != 0)
+ return intel_hdmi_edid_detect(connector);
+ else
return connector_status_disconnected;
}
diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c
index f7061f68d050..62b8bead7652 100644
--- a/drivers/gpu/drm/i915/intel_i2c.c
+++ b/drivers/gpu/drm/i915/intel_i2c.c
@@ -124,6 +124,7 @@ static void set_data(void *data, int state_high)
* @output: driver specific output device
* @reg: GPIO reg to use
* @name: name for this bus
+ * @slave_addr: slave address (if fixed)
*
* Creates and registers a new i2c bus with the Linux i2c layer, for use
* in output probing and control (e.g. DDC or SDVO control functions).
@@ -139,8 +140,8 @@ static void set_data(void *data, int state_high)
* %GPIOH
* see PRM for details on how these different busses are used.
*/
-struct intel_i2c_chan *intel_i2c_create(struct drm_device *dev, const u32 reg,
- const char *name)
+struct i2c_adapter *intel_i2c_create(struct drm_device *dev, const u32 reg,
+ const char *name)
{
struct intel_i2c_chan *chan;
@@ -174,7 +175,7 @@ struct intel_i2c_chan *intel_i2c_create(struct drm_device *dev, const u32 reg,
intel_i2c_quirk_set(dev, false);
udelay(20);
- return chan;
+ return &chan->adapter;
out_free:
kfree(chan);
@@ -187,11 +188,16 @@ out_free:
*
* Unregister the adapter from the i2c layer, then free the structure.
*/
-void intel_i2c_destroy(struct intel_i2c_chan *chan)
+void intel_i2c_destroy(struct i2c_adapter *adapter)
{
- if (!chan)
+ struct intel_i2c_chan *chan;
+
+ if (!adapter)
return;
+ chan = container_of(adapter,
+ struct intel_i2c_chan,
+ adapter);
i2c_del_adapter(&chan->adapter);
kfree(chan);
}
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index f073ed8432e8..9ab38efffecf 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -36,9 +36,25 @@
#include "intel_drv.h"
#include "i915_drm.h"
#include "i915_drv.h"
+#include <linux/acpi.h>
#define I915_LVDS "i915_lvds"
+/*
+ * the following four scaling options are defined.
+ * #define DRM_MODE_SCALE_NON_GPU 0
+ * #define DRM_MODE_SCALE_FULLSCREEN 1
+ * #define DRM_MODE_SCALE_NO_SCALE 2
+ * #define DRM_MODE_SCALE_ASPECT 3
+ */
+
+/* Private structure for the integrated LVDS support */
+struct intel_lvds_priv {
+ int fitting_mode;
+ u32 pfit_control;
+ u32 pfit_pgm_ratios;
+};
+
/**
* Sets the backlight level.
*
@@ -213,26 +229,45 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
+ /*
+ * float point operation is not supported . So the PANEL_RATIO_FACTOR
+ * is defined, which can avoid the float point computation when
+ * calculating the panel ratio.
+ */
+#define PANEL_RATIO_FACTOR 8192
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
struct drm_encoder *tmp_encoder;
+ struct intel_output *intel_output = enc_to_intel_output(encoder);
+ struct intel_lvds_priv *lvds_priv = intel_output->dev_priv;
+ u32 pfit_control = 0, pfit_pgm_ratios = 0;
+ int left_border = 0, right_border = 0, top_border = 0;
+ int bottom_border = 0;
+ bool border = 0;
+ int panel_ratio, desired_ratio, vert_scale, horiz_scale;
+ int horiz_ratio, vert_ratio;
+ u32 hsync_width, vsync_width;
+ u32 hblank_width, vblank_width;
+ u32 hsync_pos, vsync_pos;
/* Should never happen!! */
if (!IS_I965G(dev) && intel_crtc->pipe == 0) {
- printk(KERN_ERR "Can't support LVDS on pipe A\n");
+ DRM_ERROR("Can't support LVDS on pipe A\n");
return false;
}
/* Should never happen!! */
list_for_each_entry(tmp_encoder, &dev->mode_config.encoder_list, head) {
if (tmp_encoder != encoder && tmp_encoder->crtc == encoder->crtc) {
- printk(KERN_ERR "Can't enable LVDS and another "
+ DRM_ERROR("Can't enable LVDS and another "
"encoder on the same pipe\n");
return false;
}
}
-
+ /* If we don't have a panel mode, there is nothing we can do */
+ if (dev_priv->panel_fixed_mode == NULL)
+ return true;
/*
* If we have timings from the BIOS for the panel, put them in
* to the adjusted mode. The CRTC will be set up for this mode,
@@ -256,6 +291,243 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
drm_mode_set_crtcinfo(adjusted_mode, CRTC_INTERLACE_HALVE_V);
}
+ /* Make sure pre-965s set dither correctly */
+ if (!IS_I965G(dev)) {
+ if (dev_priv->panel_wants_dither || dev_priv->lvds_dither)
+ pfit_control |= PANEL_8TO6_DITHER_ENABLE;
+ }
+
+ /* Native modes don't need fitting */
+ if (adjusted_mode->hdisplay == mode->hdisplay &&
+ adjusted_mode->vdisplay == mode->vdisplay) {
+ pfit_pgm_ratios = 0;
+ border = 0;
+ goto out;
+ }
+
+ /* 965+ wants fuzzy fitting */
+ if (IS_I965G(dev))
+ pfit_control |= (intel_crtc->pipe << PFIT_PIPE_SHIFT) |
+ PFIT_FILTER_FUZZY;
+
+ hsync_width = adjusted_mode->crtc_hsync_end -
+ adjusted_mode->crtc_hsync_start;
+ vsync_width = adjusted_mode->crtc_vsync_end -
+ adjusted_mode->crtc_vsync_start;
+ hblank_width = adjusted_mode->crtc_hblank_end -
+ adjusted_mode->crtc_hblank_start;
+ vblank_width = adjusted_mode->crtc_vblank_end -
+ adjusted_mode->crtc_vblank_start;
+ /*
+ * Deal with panel fitting options. Figure out how to stretch the
+ * image based on its aspect ratio & the current panel fitting mode.
+ */
+ panel_ratio = adjusted_mode->hdisplay * PANEL_RATIO_FACTOR /
+ adjusted_mode->vdisplay;
+ desired_ratio = mode->hdisplay * PANEL_RATIO_FACTOR /
+ mode->vdisplay;
+ /*
+ * Enable automatic panel scaling for non-native modes so that they fill
+ * the screen. Should be enabled before the pipe is enabled, according
+ * to register description and PRM.
+ * Change the value here to see the borders for debugging
+ */
+ I915_WRITE(BCLRPAT_A, 0);
+ I915_WRITE(BCLRPAT_B, 0);
+
+ switch (lvds_priv->fitting_mode) {
+ case DRM_MODE_SCALE_NO_SCALE:
+ /*
+ * For centered modes, we have to calculate border widths &
+ * heights and modify the values programmed into the CRTC.
+ */
+ left_border = (adjusted_mode->hdisplay - mode->hdisplay) / 2;
+ right_border = left_border;
+ if (mode->hdisplay & 1)
+ right_border++;
+ top_border = (adjusted_mode->vdisplay - mode->vdisplay) / 2;
+ bottom_border = top_border;
+ if (mode->vdisplay & 1)
+ bottom_border++;
+ /* Set active & border values */
+ adjusted_mode->crtc_hdisplay = mode->hdisplay;
+ /* Keep the boder be even */
+ if (right_border & 1)
+ right_border++;
+ /* use the border directly instead of border minuse one */
+ adjusted_mode->crtc_hblank_start = mode->hdisplay +
+ right_border;
+ /* keep the blank width constant */
+ adjusted_mode->crtc_hblank_end =
+ adjusted_mode->crtc_hblank_start + hblank_width;
+ /* get the hsync pos relative to hblank start */
+ hsync_pos = (hblank_width - hsync_width) / 2;
+ /* keep the hsync pos be even */
+ if (hsync_pos & 1)
+ hsync_pos++;
+ adjusted_mode->crtc_hsync_start =
+ adjusted_mode->crtc_hblank_start + hsync_pos;
+ /* keep the hsync width constant */
+ adjusted_mode->crtc_hsync_end =
+ adjusted_mode->crtc_hsync_start + hsync_width;
+ adjusted_mode->crtc_vdisplay = mode->vdisplay;
+ /* use the border instead of border minus one */
+ adjusted_mode->crtc_vblank_start = mode->vdisplay +
+ bottom_border;
+ /* keep the vblank width constant */
+ adjusted_mode->crtc_vblank_end =
+ adjusted_mode->crtc_vblank_start + vblank_width;
+ /* get the vsync start postion relative to vblank start */
+ vsync_pos = (vblank_width - vsync_width) / 2;
+ adjusted_mode->crtc_vsync_start =
+ adjusted_mode->crtc_vblank_start + vsync_pos;
+ /* keep the vsync width constant */
+ adjusted_mode->crtc_vsync_end =
+ adjusted_mode->crtc_vblank_start + vsync_width;
+ border = 1;
+ break;
+ case DRM_MODE_SCALE_ASPECT:
+ /* Scale but preserve the spect ratio */
+ pfit_control |= PFIT_ENABLE;
+ if (IS_I965G(dev)) {
+ /* 965+ is easy, it does everything in hw */
+ if (panel_ratio > desired_ratio)
+ pfit_control |= PFIT_SCALING_PILLAR;
+ else if (panel_ratio < desired_ratio)
+ pfit_control |= PFIT_SCALING_LETTER;
+ else
+ pfit_control |= PFIT_SCALING_AUTO;
+ } else {
+ /*
+ * For earlier chips we have to calculate the scaling
+ * ratio by hand and program it into the
+ * PFIT_PGM_RATIO register
+ */
+ u32 horiz_bits, vert_bits, bits = 12;
+ horiz_ratio = mode->hdisplay * PANEL_RATIO_FACTOR/
+ adjusted_mode->hdisplay;
+ vert_ratio = mode->vdisplay * PANEL_RATIO_FACTOR/
+ adjusted_mode->vdisplay;
+ horiz_scale = adjusted_mode->hdisplay *
+ PANEL_RATIO_FACTOR / mode->hdisplay;
+ vert_scale = adjusted_mode->vdisplay *
+ PANEL_RATIO_FACTOR / mode->vdisplay;
+
+ /* retain aspect ratio */
+ if (panel_ratio > desired_ratio) { /* Pillar */
+ u32 scaled_width;
+ scaled_width = mode->hdisplay * vert_scale /
+ PANEL_RATIO_FACTOR;
+ horiz_ratio = vert_ratio;
+ pfit_control |= (VERT_AUTO_SCALE |
+ VERT_INTERP_BILINEAR |
+ HORIZ_INTERP_BILINEAR);
+ /* Pillar will have left/right borders */
+ left_border = (adjusted_mode->hdisplay -
+ scaled_width) / 2;
+ right_border = left_border;
+ if (mode->hdisplay & 1) /* odd resolutions */
+ right_border++;
+ /* keep the border be even */
+ if (right_border & 1)
+ right_border++;
+ adjusted_mode->crtc_hdisplay = scaled_width;
+ /* use border instead of border minus one */
+ adjusted_mode->crtc_hblank_start =
+ scaled_width + right_border;
+ /* keep the hblank width constant */
+ adjusted_mode->crtc_hblank_end =
+ adjusted_mode->crtc_hblank_start +
+ hblank_width;
+ /*
+ * get the hsync start pos relative to
+ * hblank start
+ */
+ hsync_pos = (hblank_width - hsync_width) / 2;
+ /* keep the hsync_pos be even */
+ if (hsync_pos & 1)
+ hsync_pos++;
+ adjusted_mode->crtc_hsync_start =
+ adjusted_mode->crtc_hblank_start +
+ hsync_pos;
+ /* keept hsync width constant */
+ adjusted_mode->crtc_hsync_end =
+ adjusted_mode->crtc_hsync_start +
+ hsync_width;
+ border = 1;
+ } else if (panel_ratio < desired_ratio) { /* letter */
+ u32 scaled_height = mode->vdisplay *
+ horiz_scale / PANEL_RATIO_FACTOR;
+ vert_ratio = horiz_ratio;
+ pfit_control |= (HORIZ_AUTO_SCALE |
+ VERT_INTERP_BILINEAR |
+ HORIZ_INTERP_BILINEAR);
+ /* Letterbox will have top/bottom border */
+ top_border = (adjusted_mode->vdisplay -
+ scaled_height) / 2;
+ bottom_border = top_border;
+ if (mode->vdisplay & 1)
+ bottom_border++;
+ adjusted_mode->crtc_vdisplay = scaled_height;
+ /* use border instead of border minus one */
+ adjusted_mode->crtc_vblank_start =
+ scaled_height + bottom_border;
+ /* keep the vblank width constant */
+ adjusted_mode->crtc_vblank_end =
+ adjusted_mode->crtc_vblank_start +
+ vblank_width;
+ /*
+ * get the vsync start pos relative to
+ * vblank start
+ */
+ vsync_pos = (vblank_width - vsync_width) / 2;
+ adjusted_mode->crtc_vsync_start =
+ adjusted_mode->crtc_vblank_start +
+ vsync_pos;
+ /* keep the vsync width constant */
+ adjusted_mode->crtc_vsync_end =
+ adjusted_mode->crtc_vsync_start +
+ vsync_width;
+ border = 1;
+ } else {
+ /* Aspects match, Let hw scale both directions */
+ pfit_control |= (VERT_AUTO_SCALE |
+ HORIZ_AUTO_SCALE |
+ VERT_INTERP_BILINEAR |
+ HORIZ_INTERP_BILINEAR);
+ }
+ horiz_bits = (1 << bits) * horiz_ratio /
+ PANEL_RATIO_FACTOR;
+ vert_bits = (1 << bits) * vert_ratio /
+ PANEL_RATIO_FACTOR;
+ pfit_pgm_ratios =
+ ((vert_bits << PFIT_VERT_SCALE_SHIFT) &
+ PFIT_VERT_SCALE_MASK) |
+ ((horiz_bits << PFIT_HORIZ_SCALE_SHIFT) &
+ PFIT_HORIZ_SCALE_MASK);
+ }
+ break;
+
+ case DRM_MODE_SCALE_FULLSCREEN:
+ /*
+ * Full scaling, even if it changes the aspect ratio.
+ * Fortunately this is all done for us in hw.
+ */
+ pfit_control |= PFIT_ENABLE;
+ if (IS_I965G(dev))
+ pfit_control |= PFIT_SCALING_AUTO;
+ else
+ pfit_control |= (VERT_AUTO_SCALE | HORIZ_AUTO_SCALE |
+ VERT_INTERP_BILINEAR |
+ HORIZ_INTERP_BILINEAR);
+ break;
+ default:
+ break;
+ }
+
+out:
+ lvds_priv->pfit_control = pfit_control;
+ lvds_priv->pfit_pgm_ratios = pfit_pgm_ratios;
/*
* XXX: It would be nice to support lower refresh rates on the
* panels to reduce power consumption, and perhaps match the
@@ -301,8 +573,8 @@ static void intel_lvds_mode_set(struct drm_encoder *encoder,
{
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
- u32 pfit_control;
+ struct intel_output *intel_output = enc_to_intel_output(encoder);
+ struct intel_lvds_priv *lvds_priv = intel_output->dev_priv;
/*
* The LVDS pin pair will already have been turned on in the
@@ -319,22 +591,8 @@ static void intel_lvds_mode_set(struct drm_encoder *encoder,
* screen. Should be enabled before the pipe is enabled, according to
* register description and PRM.
*/
- if (mode->hdisplay != adjusted_mode->hdisplay ||
- mode->vdisplay != adjusted_mode->vdisplay)
- pfit_control = (PFIT_ENABLE | VERT_AUTO_SCALE |
- HORIZ_AUTO_SCALE | VERT_INTERP_BILINEAR |
- HORIZ_INTERP_BILINEAR);
- else
- pfit_control = 0;
-
- if (!IS_I965G(dev)) {
- if (dev_priv->panel_wants_dither || dev_priv->lvds_dither)
- pfit_control |= PANEL_8TO6_DITHER_ENABLE;
- }
- else
- pfit_control |= intel_crtc->pipe << PFIT_PIPE_SHIFT;
-
- I915_WRITE(PFIT_CONTROL, pfit_control);
+ I915_WRITE(PFIT_PGM_RATIOS, lvds_priv->pfit_pgm_ratios);
+ I915_WRITE(PFIT_CONTROL, lvds_priv->pfit_control);
}
/**
@@ -406,6 +664,34 @@ static int intel_lvds_set_property(struct drm_connector *connector,
struct drm_property *property,
uint64_t value)
{
+ struct drm_device *dev = connector->dev;
+ struct intel_output *intel_output =
+ to_intel_output(connector);
+
+ if (property == dev->mode_config.scaling_mode_property &&
+ connector->encoder) {
+ struct drm_crtc *crtc = connector->encoder->crtc;
+ struct intel_lvds_priv *lvds_priv = intel_output->dev_priv;
+ if (value == DRM_MODE_SCALE_NON_GPU) {
+ DRM_DEBUG_KMS(I915_LVDS,
+ "non_GPU property is unsupported\n");
+ return 0;
+ }
+ if (lvds_priv->fitting_mode == value) {
+ /* the LVDS scaling property is not changed */
+ return 0;
+ }
+ lvds_priv->fitting_mode = value;
+ if (crtc && crtc->enabled) {
+ /*
+ * If the CRTC is enabled, the display will be changed
+ * according to the new panel fitting mode.
+ */
+ drm_crtc_helper_set_mode(crtc, &crtc->mode,
+ crtc->x, crtc->y, crtc->fb);
+ }
+ }
+
return 0;
}
@@ -456,7 +742,7 @@ static const struct dmi_system_id intel_no_lvds[] = {
.callback = intel_no_lvds_dmi_callback,
.ident = "Apple Mac Mini (Core series)",
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
+ DMI_MATCH(DMI_SYS_VENDOR, "Apple"),
DMI_MATCH(DMI_PRODUCT_NAME, "Macmini1,1"),
},
},
@@ -464,7 +750,7 @@ static const struct dmi_system_id intel_no_lvds[] = {
.callback = intel_no_lvds_dmi_callback,
.ident = "Apple Mac Mini (Core 2 series)",
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
+ DMI_MATCH(DMI_SYS_VENDOR, "Apple"),
DMI_MATCH(DMI_PRODUCT_NAME, "Macmini2,1"),
},
},
@@ -503,6 +789,65 @@ static const struct dmi_system_id intel_no_lvds[] = {
{ } /* terminating entry */
};
+#ifdef CONFIG_ACPI
+/*
+ * check_lid_device -- check whether @handle is an ACPI LID device.
+ * @handle: ACPI device handle
+ * @level : depth in the ACPI namespace tree
+ * @context: the number of LID device when we find the device
+ * @rv: a return value to fill if desired (Not use)
+ */
+static acpi_status
+check_lid_device(acpi_handle handle, u32 level, void *context,
+ void **return_value)
+{
+ struct acpi_device *acpi_dev;
+ int *lid_present = context;
+
+ acpi_dev = NULL;
+ /* Get the acpi device for device handle */
+ if (acpi_bus_get_device(handle, &acpi_dev) || !acpi_dev) {
+ /* If there is no ACPI device for handle, return */
+ return AE_OK;
+ }
+
+ if (!strncmp(acpi_device_hid(acpi_dev), "PNP0C0D", 7))
+ *lid_present = 1;
+
+ return AE_OK;
+}
+
+/**
+ * check whether there exists the ACPI LID device by enumerating the ACPI
+ * device tree.
+ */
+static int intel_lid_present(void)
+{
+ int lid_present = 0;
+
+ if (acpi_disabled) {
+ /* If ACPI is disabled, there is no ACPI device tree to
+ * check, so assume the LID device would have been present.
+ */
+ return 1;
+ }
+
+ acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
+ ACPI_UINT32_MAX,
+ check_lid_device, &lid_present, NULL);
+
+ return lid_present;
+}
+#else
+static int intel_lid_present(void)
+{
+ /* In the absence of ACPI built in, assume that the LID device would
+ * have been present.
+ */
+ return 1;
+}
+#endif
+
/**
* intel_lvds_init - setup LVDS connectors on this device
* @dev: drm device
@@ -518,6 +863,7 @@ void intel_lvds_init(struct drm_device *dev)
struct drm_encoder *encoder;
struct drm_display_mode *scan; /* *modes, *bios_mode; */
struct drm_crtc *crtc;
+ struct intel_lvds_priv *lvds_priv;
u32 lvds;
int pipe, gpio = GPIOC;
@@ -525,13 +871,24 @@ void intel_lvds_init(struct drm_device *dev)
if (dmi_check_system(intel_no_lvds))
return;
+ /* Assume that any device without an ACPI LID device also doesn't
+ * have an integrated LVDS. We would be better off parsing the BIOS
+ * to get a reliable indicator, but that code isn't written yet.
+ *
+ * In the case of all-in-one desktops using LVDS that we've seen,
+ * they're using SDVO LVDS.
+ */
+ if (!intel_lid_present())
+ return;
+
if (IS_IGDNG(dev)) {
if ((I915_READ(PCH_LVDS) & LVDS_DETECTED) == 0)
return;
gpio = PCH_GPIOC;
}
- intel_output = kzalloc(sizeof(struct intel_output), GFP_KERNEL);
+ intel_output = kzalloc(sizeof(struct intel_output) +
+ sizeof(struct intel_lvds_priv), GFP_KERNEL);
if (!intel_output) {
return;
}
@@ -553,7 +910,18 @@ void intel_lvds_init(struct drm_device *dev)
connector->interlace_allowed = false;
connector->doublescan_allowed = false;
+ lvds_priv = (struct intel_lvds_priv *)(intel_output + 1);
+ intel_output->dev_priv = lvds_priv;
+ /* create the scaling mode property */
+ drm_mode_create_scaling_mode_property(dev);
+ /*
+ * the initial panel fitting mode will be FULL_SCREEN.
+ */
+ drm_connector_attach_property(&intel_output->base,
+ dev->mode_config.scaling_mode_property,
+ DRM_MODE_SCALE_FULLSCREEN);
+ lvds_priv->fitting_mode = DRM_MODE_SCALE_FULLSCREEN;
/*
* LVDS discovery:
* 1) check for EDID on DDC
@@ -649,5 +1017,5 @@ failed:
if (intel_output->ddc_bus)
intel_i2c_destroy(intel_output->ddc_bus);
drm_connector_cleanup(connector);
- kfree(connector);
+ kfree(intel_output);
}
diff --git a/drivers/gpu/drm/i915/intel_modes.c b/drivers/gpu/drm/i915/intel_modes.c
index e0910fefce87..67e2f4632a24 100644
--- a/drivers/gpu/drm/i915/intel_modes.c
+++ b/drivers/gpu/drm/i915/intel_modes.c
@@ -53,10 +53,9 @@ bool intel_ddc_probe(struct intel_output *intel_output)
}
};
- intel_i2c_quirk_set(intel_output->ddc_bus->drm_dev, true);
- ret = i2c_transfer(&intel_output->ddc_bus->adapter, msgs, 2);
- intel_i2c_quirk_set(intel_output->ddc_bus->drm_dev, false);
-
+ intel_i2c_quirk_set(intel_output->base.dev, true);
+ ret = i2c_transfer(intel_output->ddc_bus, msgs, 2);
+ intel_i2c_quirk_set(intel_output->base.dev, false);
if (ret == 2)
return true;
@@ -74,10 +73,9 @@ int intel_ddc_get_modes(struct intel_output *intel_output)
struct edid *edid;
int ret = 0;
- intel_i2c_quirk_set(intel_output->ddc_bus->drm_dev, true);
- edid = drm_get_edid(&intel_output->base,
- &intel_output->ddc_bus->adapter);
- intel_i2c_quirk_set(intel_output->ddc_bus->drm_dev, false);
+ intel_i2c_quirk_set(intel_output->base.dev, true);
+ edid = drm_get_edid(&intel_output->base, intel_output->ddc_bus);
+ intel_i2c_quirk_set(intel_output->base.dev, false);
if (edid) {
drm_mode_connector_update_edid_property(&intel_output->base,
edid);
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index 9a00adb3a508..4f0c30948bc4 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -38,8 +38,7 @@
#undef SDVO_DEBUG
#define I915_SDVO "i915_sdvo"
struct intel_sdvo_priv {
- struct intel_i2c_chan *i2c_bus;
- int slaveaddr;
+ u8 slave_addr;
/* Register for the SDVO device: SDVOB or SDVOC */
int output_device;
@@ -69,12 +68,23 @@ struct intel_sdvo_priv {
* This is set if we treat the device as HDMI, instead of DVI.
*/
bool is_hdmi;
+
/**
* This is set if we detect output of sdvo device as LVDS.
*/
bool is_lvds;
/**
+ * This is sdvo flags for input timing.
+ */
+ uint8_t sdvo_flags;
+
+ /**
+ * This is sdvo fixed pannel mode pointer
+ */
+ struct drm_display_mode *sdvo_lvds_fixed_mode;
+
+ /**
* Returned SDTV resolutions allowed for the current format, if the
* device reported it.
*/
@@ -146,13 +156,13 @@ static bool intel_sdvo_read_byte(struct intel_output *intel_output, u8 addr,
struct i2c_msg msgs[] = {
{
- .addr = sdvo_priv->i2c_bus->slave_addr,
+ .addr = sdvo_priv->slave_addr >> 1,
.flags = 0,
.len = 1,
.buf = out_buf,
},
{
- .addr = sdvo_priv->i2c_bus->slave_addr,
+ .addr = sdvo_priv->slave_addr >> 1,
.flags = I2C_M_RD,
.len = 1,
.buf = buf,
@@ -162,7 +172,7 @@ static bool intel_sdvo_read_byte(struct intel_output *intel_output, u8 addr,
out_buf[0] = addr;
out_buf[1] = 0;
- if ((ret = i2c_transfer(&sdvo_priv->i2c_bus->adapter, msgs, 2)) == 2)
+ if ((ret = i2c_transfer(intel_output->i2c_bus, msgs, 2)) == 2)
{
*ch = buf[0];
return true;
@@ -175,10 +185,11 @@ static bool intel_sdvo_read_byte(struct intel_output *intel_output, u8 addr,
static bool intel_sdvo_write_byte(struct intel_output *intel_output, int addr,
u8 ch)
{
+ struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
u8 out_buf[2];
struct i2c_msg msgs[] = {
{
- .addr = intel_output->i2c_bus->slave_addr,
+ .addr = sdvo_priv->slave_addr >> 1,
.flags = 0,
.len = 2,
.buf = out_buf,
@@ -188,7 +199,7 @@ static bool intel_sdvo_write_byte(struct intel_output *intel_output, int addr,
out_buf[0] = addr;
out_buf[1] = ch;
- if (i2c_transfer(&intel_output->i2c_bus->adapter, msgs, 1) == 1)
+ if (i2c_transfer(intel_output->i2c_bus, msgs, 1) == 1)
{
return true;
}
@@ -592,6 +603,7 @@ intel_sdvo_create_preferred_input_timing(struct intel_output *output,
uint16_t height)
{
struct intel_sdvo_preferred_input_timing_args args;
+ struct intel_sdvo_priv *sdvo_priv = output->dev_priv;
uint8_t status;
memset(&args, 0, sizeof(args));
@@ -599,7 +611,12 @@ intel_sdvo_create_preferred_input_timing(struct intel_output *output,
args.width = width;
args.height = height;
args.interlace = 0;
- args.scaled = 0;
+
+ if (sdvo_priv->is_lvds &&
+ (sdvo_priv->sdvo_lvds_fixed_mode->hdisplay != width ||
+ sdvo_priv->sdvo_lvds_fixed_mode->vdisplay != height))
+ args.scaled = 1;
+
intel_sdvo_write_cmd(output, SDVO_CMD_CREATE_PREFERRED_INPUT_TIMING,
&args, sizeof(args));
status = intel_sdvo_read_response(output, NULL, 0);
@@ -944,12 +961,7 @@ static bool intel_sdvo_mode_fixup(struct drm_encoder *encoder,
struct intel_output *output = enc_to_intel_output(encoder);
struct intel_sdvo_priv *dev_priv = output->dev_priv;
- if (!dev_priv->is_tv) {
- /* Make the CRTC code factor in the SDVO pixel multiplier. The
- * SDVO device will be told of the multiplier during mode_set.
- */
- adjusted_mode->clock *= intel_sdvo_get_pixel_multiplier(mode);
- } else {
+ if (dev_priv->is_tv) {
struct intel_sdvo_dtd output_dtd;
bool success;
@@ -980,6 +992,7 @@ static bool intel_sdvo_mode_fixup(struct drm_encoder *encoder,
intel_sdvo_get_preferred_input_timing(output,
&input_dtd);
intel_sdvo_get_mode_from_dtd(adjusted_mode, &input_dtd);
+ dev_priv->sdvo_flags = input_dtd.part2.sdvo_flags;
drm_mode_set_crtcinfo(adjusted_mode, 0);
@@ -990,6 +1003,52 @@ static bool intel_sdvo_mode_fixup(struct drm_encoder *encoder,
} else {
return false;
}
+ } else if (dev_priv->is_lvds) {
+ struct intel_sdvo_dtd output_dtd;
+ bool success;
+
+ drm_mode_set_crtcinfo(dev_priv->sdvo_lvds_fixed_mode, 0);
+ /* Set output timings */
+ intel_sdvo_get_dtd_from_mode(&output_dtd,
+ dev_priv->sdvo_lvds_fixed_mode);
+
+ intel_sdvo_set_target_output(output,
+ dev_priv->controlled_output);
+ intel_sdvo_set_output_timing(output, &output_dtd);
+
+ /* Set the input timing to the screen. Assume always input 0. */
+ intel_sdvo_set_target_input(output, true, false);
+
+
+ success = intel_sdvo_create_preferred_input_timing(
+ output,
+ mode->clock / 10,
+ mode->hdisplay,
+ mode->vdisplay);
+
+ if (success) {
+ struct intel_sdvo_dtd input_dtd;
+
+ intel_sdvo_get_preferred_input_timing(output,
+ &input_dtd);
+ intel_sdvo_get_mode_from_dtd(adjusted_mode, &input_dtd);
+ dev_priv->sdvo_flags = input_dtd.part2.sdvo_flags;
+
+ drm_mode_set_crtcinfo(adjusted_mode, 0);
+
+ mode->clock = adjusted_mode->clock;
+
+ adjusted_mode->clock *=
+ intel_sdvo_get_pixel_multiplier(mode);
+ } else {
+ return false;
+ }
+
+ } else {
+ /* Make the CRTC code factor in the SDVO pixel multiplier. The
+ * SDVO device will be told of the multiplier during mode_set.
+ */
+ adjusted_mode->clock *= intel_sdvo_get_pixel_multiplier(mode);
}
return true;
}
@@ -1033,15 +1092,16 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
/* We have tried to get input timing in mode_fixup, and filled into
adjusted_mode */
- if (sdvo_priv->is_tv)
+ if (sdvo_priv->is_tv || sdvo_priv->is_lvds) {
intel_sdvo_get_dtd_from_mode(&input_dtd, adjusted_mode);
- else
+ input_dtd.part2.sdvo_flags = sdvo_priv->sdvo_flags;
+ } else
intel_sdvo_get_dtd_from_mode(&input_dtd, mode);
/* If it's a TV, we already set the output timing in mode_fixup.
* Otherwise, the output timing is equal to the input timing.
*/
- if (!sdvo_priv->is_tv) {
+ if (!sdvo_priv->is_tv && !sdvo_priv->is_lvds) {
/* Set the output timing to the screen */
intel_sdvo_set_target_output(output,
sdvo_priv->controlled_output);
@@ -1116,6 +1176,8 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
sdvox |= (sdvo_pixel_multiply - 1) << SDVO_PORT_MULTIPLY_SHIFT;
}
+ if (sdvo_priv->sdvo_flags & SDVO_NEED_TO_STALL)
+ sdvox |= SDVO_STALL_SELECT;
intel_sdvo_write_sdvox(output, sdvox);
}
@@ -1276,6 +1338,17 @@ static int intel_sdvo_mode_valid(struct drm_connector *connector,
if (sdvo_priv->pixel_clock_max < mode->clock)
return MODE_CLOCK_HIGH;
+ if (sdvo_priv->is_lvds == true) {
+ if (sdvo_priv->sdvo_lvds_fixed_mode == NULL)
+ return MODE_PANEL;
+
+ if (mode->hdisplay > sdvo_priv->sdvo_lvds_fixed_mode->hdisplay)
+ return MODE_PANEL;
+
+ if (mode->vdisplay > sdvo_priv->sdvo_lvds_fixed_mode->vdisplay)
+ return MODE_PANEL;
+ }
+
return MODE_OK;
}
@@ -1369,9 +1442,8 @@ intel_sdvo_hdmi_sink_detect(struct drm_connector *connector)
struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
struct edid *edid = NULL;
- intel_sdvo_set_control_bus_switch(intel_output, sdvo_priv->ddc_bus);
edid = drm_get_edid(&intel_output->base,
- &intel_output->ddc_bus->adapter);
+ intel_output->ddc_bus);
if (edid != NULL) {
sdvo_priv->is_hdmi = drm_detect_hdmi_monitor(edid);
kfree(edid);
@@ -1549,23 +1621,21 @@ static void intel_sdvo_get_tv_modes(struct drm_connector *connector)
static void intel_sdvo_get_lvds_modes(struct drm_connector *connector)
{
struct intel_output *intel_output = to_intel_output(connector);
- struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
struct drm_i915_private *dev_priv = connector->dev->dev_private;
+ struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
+ struct drm_display_mode *newmode;
/*
* Attempt to get the mode list from DDC.
* Assume that the preferred modes are
* arranged in priority order.
*/
- /* set the bus switch and get the modes */
- intel_sdvo_set_control_bus_switch(intel_output, sdvo_priv->ddc_bus);
intel_ddc_get_modes(intel_output);
if (list_empty(&connector->probed_modes) == false)
- return;
+ goto end;
/* Fetch modes from VBT */
if (dev_priv->sdvo_lvds_vbt_mode != NULL) {
- struct drm_display_mode *newmode;
newmode = drm_mode_duplicate(connector->dev,
dev_priv->sdvo_lvds_vbt_mode);
if (newmode != NULL) {
@@ -1575,6 +1645,16 @@ static void intel_sdvo_get_lvds_modes(struct drm_connector *connector)
drm_mode_probed_add(connector, newmode);
}
}
+
+end:
+ list_for_each_entry(newmode, &connector->probed_modes, head) {
+ if (newmode->type & DRM_MODE_TYPE_PREFERRED) {
+ sdvo_priv->sdvo_lvds_fixed_mode =
+ drm_mode_duplicate(connector->dev, newmode);
+ break;
+ }
+ }
+
}
static int intel_sdvo_get_modes(struct drm_connector *connector)
@@ -1597,14 +1677,20 @@ static int intel_sdvo_get_modes(struct drm_connector *connector)
static void intel_sdvo_destroy(struct drm_connector *connector)
{
struct intel_output *intel_output = to_intel_output(connector);
+ struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
if (intel_output->i2c_bus)
intel_i2c_destroy(intel_output->i2c_bus);
if (intel_output->ddc_bus)
intel_i2c_destroy(intel_output->ddc_bus);
+ if (sdvo_priv->sdvo_lvds_fixed_mode != NULL)
+ drm_mode_destroy(connector->dev,
+ sdvo_priv->sdvo_lvds_fixed_mode);
+
drm_sysfs_connector_remove(connector);
drm_connector_cleanup(connector);
+
kfree(intel_output);
}
@@ -1709,7 +1795,7 @@ intel_sdvo_chan_to_intel_output(struct intel_i2c_chan *chan)
list_for_each_entry(connector,
&dev->mode_config.connector_list, head) {
- if (to_intel_output(connector)->ddc_bus == chan) {
+ if (to_intel_output(connector)->ddc_bus == &chan->adapter) {
intel_output = to_intel_output(connector);
break;
}
@@ -1723,7 +1809,7 @@ static int intel_sdvo_master_xfer(struct i2c_adapter *i2c_adap,
struct intel_output *intel_output;
struct intel_sdvo_priv *sdvo_priv;
struct i2c_algo_bit_data *algo_data;
- struct i2c_algorithm *algo;
+ const struct i2c_algorithm *algo;
algo_data = (struct i2c_algo_bit_data *)i2c_adap->algo_data;
intel_output =
@@ -1733,7 +1819,7 @@ static int intel_sdvo_master_xfer(struct i2c_adapter *i2c_adap,
return -EINVAL;
sdvo_priv = intel_output->dev_priv;
- algo = (struct i2c_algorithm *)intel_output->i2c_bus->adapter.algo;
+ algo = intel_output->i2c_bus->algo;
intel_sdvo_set_control_bus_switch(intel_output, sdvo_priv->ddc_bus);
return algo->master_xfer(i2c_adap, msgs, num);
@@ -1785,13 +1871,11 @@ bool intel_sdvo_init(struct drm_device *dev, int output_device)
struct drm_connector *connector;
struct intel_output *intel_output;
struct intel_sdvo_priv *sdvo_priv;
- struct intel_i2c_chan *i2cbus = NULL;
- struct intel_i2c_chan *ddcbus = NULL;
+
int connector_type;
u8 ch[0x40];
int i;
- int encoder_type, output_id;
- u8 slave_addr;
+ int encoder_type;
intel_output = kcalloc(sizeof(struct intel_output)+sizeof(struct intel_sdvo_priv), 1, GFP_KERNEL);
if (!intel_output) {
@@ -1799,29 +1883,24 @@ bool intel_sdvo_init(struct drm_device *dev, int output_device)
}
sdvo_priv = (struct intel_sdvo_priv *)(intel_output + 1);
+ sdvo_priv->output_device = output_device;
+
+ intel_output->dev_priv = sdvo_priv;
intel_output->type = INTEL_OUTPUT_SDVO;
/* setup the DDC bus. */
if (output_device == SDVOB)
- i2cbus = intel_i2c_create(dev, GPIOE, "SDVOCTRL_E for SDVOB");
+ intel_output->i2c_bus = intel_i2c_create(dev, GPIOE, "SDVOCTRL_E for SDVOB");
else
- i2cbus = intel_i2c_create(dev, GPIOE, "SDVOCTRL_E for SDVOC");
+ intel_output->i2c_bus = intel_i2c_create(dev, GPIOE, "SDVOCTRL_E for SDVOC");
- if (!i2cbus)
+ if (!intel_output->i2c_bus)
goto err_inteloutput;
- slave_addr = intel_sdvo_get_slave_addr(dev, output_device);
- sdvo_priv->i2c_bus = i2cbus;
+ sdvo_priv->slave_addr = intel_sdvo_get_slave_addr(dev, output_device);
- if (output_device == SDVOB) {
- output_id = 1;
- } else {
- output_id = 2;
- }
- sdvo_priv->i2c_bus->slave_addr = slave_addr >> 1;
- sdvo_priv->output_device = output_device;
- intel_output->i2c_bus = i2cbus;
- intel_output->dev_priv = sdvo_priv;
+ /* Save the bit-banging i2c functionality for use by the DDC wrapper */
+ intel_sdvo_i2c_bit_algo.functionality = intel_output->i2c_bus->algo->functionality;
/* Read the regs to test if we can talk to the device */
for (i = 0; i < 0x40; i++) {
@@ -1835,17 +1914,15 @@ bool intel_sdvo_init(struct drm_device *dev, int output_device)
/* setup the DDC bus. */
if (output_device == SDVOB)
- ddcbus = intel_i2c_create(dev, GPIOE, "SDVOB DDC BUS");
+ intel_output->ddc_bus = intel_i2c_create(dev, GPIOE, "SDVOB DDC BUS");
else
- ddcbus = intel_i2c_create(dev, GPIOE, "SDVOC DDC BUS");
+ intel_output->ddc_bus = intel_i2c_create(dev, GPIOE, "SDVOC DDC BUS");
- if (ddcbus == NULL)
+ if (intel_output->ddc_bus == NULL)
goto err_i2c;
- intel_sdvo_i2c_bit_algo.functionality =
- intel_output->i2c_bus->adapter.algo->functionality;
- ddcbus->adapter.algo = &intel_sdvo_i2c_bit_algo;
- intel_output->ddc_bus = ddcbus;
+ /* Wrap with our custom algo which switches to DDC mode */
+ intel_output->ddc_bus->algo = &intel_sdvo_i2c_bit_algo;
/* In defaut case sdvo lvds is false */
sdvo_priv->is_lvds = false;
@@ -1965,9 +2042,10 @@ bool intel_sdvo_init(struct drm_device *dev, int output_device)
return true;
err_i2c:
- if (ddcbus != NULL)
+ if (intel_output->ddc_bus != NULL)
intel_i2c_destroy(intel_output->ddc_bus);
- intel_i2c_destroy(intel_output->i2c_bus);
+ if (intel_output->i2c_bus != NULL)
+ intel_i2c_destroy(intel_output->i2c_bus);
err_inteloutput:
kfree(intel_output);
diff --git a/drivers/gpu/drm/i915/intel_sdvo_regs.h b/drivers/gpu/drm/i915/intel_sdvo_regs.h
index 193938b7d7f9..ba5cdf8ae40b 100644
--- a/drivers/gpu/drm/i915/intel_sdvo_regs.h
+++ b/drivers/gpu/drm/i915/intel_sdvo_regs.h
@@ -715,6 +715,7 @@ struct intel_sdvo_enhancements_arg {
#define SDVO_HBUF_TX_ONCE (2 << 6)
#define SDVO_HBUF_TX_VSYNC (3 << 6)
#define SDVO_CMD_GET_AUDIO_TX_INFO 0x9c
+#define SDVO_NEED_TO_STALL (1 << 7)
struct intel_sdvo_encode{
u8 dvi_rev;
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index ea68992e4416..a43c98e3f077 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -1383,34 +1383,31 @@ intel_tv_detect_type (struct drm_crtc *crtc, struct intel_output *intel_output)
/*
* Detect TV by polling)
*/
- if (intel_output->load_detect_temp) {
- /* TV not currently running, prod it with destructive detect */
- save_tv_dac = tv_dac;
- tv_ctl = I915_READ(TV_CTL);
- save_tv_ctl = tv_ctl;
- tv_ctl &= ~TV_ENC_ENABLE;
- tv_ctl &= ~TV_TEST_MODE_MASK;
- tv_ctl |= TV_TEST_MODE_MONITOR_DETECT;
- tv_dac &= ~TVDAC_SENSE_MASK;
- tv_dac &= ~DAC_A_MASK;
- tv_dac &= ~DAC_B_MASK;
- tv_dac &= ~DAC_C_MASK;
- tv_dac |= (TVDAC_STATE_CHG_EN |
- TVDAC_A_SENSE_CTL |
- TVDAC_B_SENSE_CTL |
- TVDAC_C_SENSE_CTL |
- DAC_CTL_OVERRIDE |
- DAC_A_0_7_V |
- DAC_B_0_7_V |
- DAC_C_0_7_V);
- I915_WRITE(TV_CTL, tv_ctl);
- I915_WRITE(TV_DAC, tv_dac);
- intel_wait_for_vblank(dev);
- tv_dac = I915_READ(TV_DAC);
- I915_WRITE(TV_DAC, save_tv_dac);
- I915_WRITE(TV_CTL, save_tv_ctl);
- intel_wait_for_vblank(dev);
- }
+ save_tv_dac = tv_dac;
+ tv_ctl = I915_READ(TV_CTL);
+ save_tv_ctl = tv_ctl;
+ tv_ctl &= ~TV_ENC_ENABLE;
+ tv_ctl &= ~TV_TEST_MODE_MASK;
+ tv_ctl |= TV_TEST_MODE_MONITOR_DETECT;
+ tv_dac &= ~TVDAC_SENSE_MASK;
+ tv_dac &= ~DAC_A_MASK;
+ tv_dac &= ~DAC_B_MASK;
+ tv_dac &= ~DAC_C_MASK;
+ tv_dac |= (TVDAC_STATE_CHG_EN |
+ TVDAC_A_SENSE_CTL |
+ TVDAC_B_SENSE_CTL |
+ TVDAC_C_SENSE_CTL |
+ DAC_CTL_OVERRIDE |
+ DAC_A_0_7_V |
+ DAC_B_0_7_V |
+ DAC_C_0_7_V);
+ I915_WRITE(TV_CTL, tv_ctl);
+ I915_WRITE(TV_DAC, tv_dac);
+ intel_wait_for_vblank(dev);
+ tv_dac = I915_READ(TV_DAC);
+ I915_WRITE(TV_DAC, save_tv_dac);
+ I915_WRITE(TV_CTL, save_tv_ctl);
+ intel_wait_for_vblank(dev);
/*
* A B C
* 0 1 1 Composite
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index f30aa7274a54..f97563db4e59 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -35,6 +35,23 @@
#include "atom.h"
/*
+ * Clear GPU surface registers.
+ */
+static void radeon_surface_init(struct radeon_device *rdev)
+{
+ /* FIXME: check this out */
+ if (rdev->family < CHIP_R600) {
+ int i;
+
+ for (i = 0; i < 8; i++) {
+ WREG32(RADEON_SURFACE0_INFO +
+ i * (RADEON_SURFACE1_INFO - RADEON_SURFACE0_INFO),
+ 0);
+ }
+ }
+}
+
+/*
* GPU scratch registers helpers function.
*/
static void radeon_scratch_init(struct radeon_device *rdev)
@@ -496,6 +513,8 @@ int radeon_device_init(struct radeon_device *rdev,
radeon_errata(rdev);
/* Initialize scratch registers */
radeon_scratch_init(rdev);
+ /* Initialize surface registers */
+ radeon_surface_init(rdev);
/* TODO: disable VGA need to use VGA request */
/* BIOS*/
@@ -604,9 +623,6 @@ int radeon_device_init(struct radeon_device *rdev,
if (r) {
return r;
}
- if (rdev->fbdev_rfb && rdev->fbdev_rfb->obj) {
- rdev->fbdev_robj = rdev->fbdev_rfb->obj->driver_private;
- }
if (!ret) {
DRM_INFO("radeon: kernel modesetting successfully initialized.\n");
}
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index 09c9fb9f6210..84ba69f48784 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -345,7 +345,7 @@ static void __exit radeon_exit(void)
drm_exit(driver);
}
-late_initcall(radeon_init);
+module_init(radeon_init);
module_exit(radeon_exit);
MODULE_AUTHOR(DRIVER_AUTHOR);
diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c
index fa86d398945e..9e8f191eb64a 100644
--- a/drivers/gpu/drm/radeon/radeon_fb.c
+++ b/drivers/gpu/drm/radeon/radeon_fb.c
@@ -478,14 +478,16 @@ int radeonfb_create(struct radeon_device *rdev,
{
struct fb_info *info;
struct radeon_fb_device *rfbdev;
- struct drm_framebuffer *fb;
+ struct drm_framebuffer *fb = NULL;
struct radeon_framebuffer *rfb;
struct drm_mode_fb_cmd mode_cmd;
struct drm_gem_object *gobj = NULL;
struct radeon_object *robj = NULL;
struct device *device = &rdev->pdev->dev;
int size, aligned_size, ret;
+ u64 fb_gpuaddr;
void *fbptr = NULL;
+ unsigned long tmp;
mode_cmd.width = surface_width;
mode_cmd.height = surface_height;
@@ -498,11 +500,12 @@ int radeonfb_create(struct radeon_device *rdev,
aligned_size = ALIGN(size, PAGE_SIZE);
ret = radeon_gem_object_create(rdev, aligned_size, 0,
- RADEON_GEM_DOMAIN_VRAM,
- false, ttm_bo_type_kernel,
- false, &gobj);
+ RADEON_GEM_DOMAIN_VRAM,
+ false, ttm_bo_type_kernel,
+ false, &gobj);
if (ret) {
- printk(KERN_ERR "failed to allocate framebuffer\n");
+ printk(KERN_ERR "failed to allocate framebuffer (%d %d)\n",
+ surface_width, surface_height);
ret = -ENOMEM;
goto out;
}
@@ -515,12 +518,19 @@ int radeonfb_create(struct radeon_device *rdev,
ret = -ENOMEM;
goto out_unref;
}
+ ret = radeon_object_pin(robj, RADEON_GEM_DOMAIN_VRAM, &fb_gpuaddr);
+ if (ret) {
+ printk(KERN_ERR "failed to pin framebuffer\n");
+ ret = -ENOMEM;
+ goto out_unref;
+ }
list_add(&fb->filp_head, &rdev->ddev->mode_config.fb_kernel_list);
rfb = to_radeon_framebuffer(fb);
*rfb_p = rfb;
rdev->fbdev_rfb = rfb;
+ rdev->fbdev_robj = robj;
info = framebuffer_alloc(sizeof(struct radeon_fb_device), device);
if (info == NULL) {
@@ -541,13 +551,13 @@ int radeonfb_create(struct radeon_device *rdev,
info->fix.xpanstep = 1; /* doing it in hw */
info->fix.ypanstep = 1; /* doing it in hw */
info->fix.ywrapstep = 0;
- info->fix.accel = FB_ACCEL_I830;
+ info->fix.accel = FB_ACCEL_NONE;
info->fix.type_aux = 0;
info->flags = FBINFO_DEFAULT;
info->fbops = &radeonfb_ops;
info->fix.line_length = fb->pitch;
- info->screen_base = fbptr;
- info->fix.smem_start = (unsigned long)fbptr;
+ tmp = fb_gpuaddr - rdev->mc.vram_location;
+ info->fix.smem_start = rdev->mc.aper_base + tmp;
info->fix.smem_len = size;
info->screen_base = fbptr;
info->screen_size = size;
@@ -562,8 +572,8 @@ int radeonfb_create(struct radeon_device *rdev,
info->var.width = -1;
info->var.xres = fb_width;
info->var.yres = fb_height;
- info->fix.mmio_start = pci_resource_start(rdev->pdev, 2);
- info->fix.mmio_len = pci_resource_len(rdev->pdev, 2);
+ info->fix.mmio_start = 0;
+ info->fix.mmio_len = 0;
info->pixmap.size = 64*1024;
info->pixmap.buf_align = 8;
info->pixmap.access_align = 32;
@@ -644,7 +654,7 @@ out_unref:
if (robj) {
radeon_object_kunmap(robj);
}
- if (ret) {
+ if (fb && ret) {
list_del(&fb->filp_head);
drm_gem_object_unreference(gobj);
drm_framebuffer_cleanup(fb);
@@ -813,6 +823,7 @@ int radeonfb_remove(struct drm_device *dev, struct drm_framebuffer *fb)
robj = rfb->obj->driver_private;
unregister_framebuffer(info);
radeon_object_kunmap(robj);
+ radeon_object_unpin(robj);
framebuffer_release(info);
}
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index 983e8df5e000..bac0d06c52ac 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -223,7 +223,6 @@ int radeon_object_pin(struct radeon_object *robj, uint32_t domain,
{
uint32_t flags;
uint32_t tmp;
- void *fbptr;
int r;
flags = radeon_object_flags_from_domain(domain);
@@ -242,10 +241,6 @@ int radeon_object_pin(struct radeon_object *robj, uint32_t domain,
DRM_ERROR("radeon: failed to reserve object for pinning it.\n");
return r;
}
- if (robj->rdev->fbdev_robj == robj) {
- mutex_lock(&robj->rdev->fbdev_info->lock);
- radeon_object_kunmap(robj);
- }
tmp = robj->tobj.mem.placement;
ttm_flag_masked(&tmp, flags, TTM_PL_MASK_MEM);
robj->tobj.proposed_placement = tmp | TTM_PL_FLAG_NO_EVICT | TTM_PL_MASK_CACHING;
@@ -261,23 +256,12 @@ int radeon_object_pin(struct radeon_object *robj, uint32_t domain,
DRM_ERROR("radeon: failed to pin object.\n");
}
radeon_object_unreserve(robj);
- if (robj->rdev->fbdev_robj == robj) {
- if (!r) {
- r = radeon_object_kmap(robj, &fbptr);
- }
- if (!r) {
- robj->rdev->fbdev_info->screen_base = fbptr;
- robj->rdev->fbdev_info->fix.smem_start = (unsigned long)fbptr;
- }
- mutex_unlock(&robj->rdev->fbdev_info->lock);
- }
return r;
}
void radeon_object_unpin(struct radeon_object *robj)
{
uint32_t flags;
- void *fbptr;
int r;
spin_lock(&robj->tobj.lock);
@@ -297,10 +281,6 @@ void radeon_object_unpin(struct radeon_object *robj)
DRM_ERROR("radeon: failed to reserve object for unpinning it.\n");
return;
}
- if (robj->rdev->fbdev_robj == robj) {
- mutex_lock(&robj->rdev->fbdev_info->lock);
- radeon_object_kunmap(robj);
- }
flags = robj->tobj.mem.placement;
robj->tobj.proposed_placement = flags & ~TTM_PL_FLAG_NO_EVICT;
r = ttm_buffer_object_validate(&robj->tobj,
@@ -310,16 +290,6 @@ void radeon_object_unpin(struct radeon_object *robj)
DRM_ERROR("radeon: failed to unpin buffer.\n");
}
radeon_object_unreserve(robj);
- if (robj->rdev->fbdev_robj == robj) {
- if (!r) {
- r = radeon_object_kmap(robj, &fbptr);
- }
- if (!r) {
- robj->rdev->fbdev_info->screen_base = fbptr;
- robj->rdev->fbdev_info->fix.smem_start = (unsigned long)fbptr;
- }
- mutex_unlock(&robj->rdev->fbdev_info->lock);
- }
}
int radeon_object_wait(struct radeon_object *robj)
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index 517c84559633..bdec583901eb 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -34,7 +34,6 @@
#include <linux/highmem.h>
#include <linux/wait.h>
#include <linux/vmalloc.h>
-#include <linux/version.h>
#include <linux/module.h>
void ttm_bo_free_old_node(struct ttm_buffer_object *bo)
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index 27b146c54fbc..40b75032ea47 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -32,7 +32,6 @@
#include <ttm/ttm_bo_driver.h>
#include <ttm/ttm_placement.h>
#include <linux/mm.h>
-#include <linux/version.h>
#include <linux/rbtree.h>
#include <linux/module.h>
#include <linux/uaccess.h>
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
index 0331fa74cd3f..75dc8bd24592 100644
--- a/drivers/gpu/drm/ttm/ttm_tt.c
+++ b/drivers/gpu/drm/ttm/ttm_tt.c
@@ -28,7 +28,6 @@
* Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
*/
-#include <linux/version.h>
#include <linux/vmalloc.h>
#include <linux/sched.h>
#include <linux/highmem.h>
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index 3c259ee7ddda..8206442fbabd 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -326,6 +326,16 @@ config I2C_DAVINCI
devices such as DaVinci NIC.
For details please see http://www.ti.com/davinci
+config I2C_DESIGNWARE
+ tristate "Synopsys DesignWare"
+ depends on HAVE_CLK
+ help
+ If you say yes to this option, support will be included for the
+ Synopsys DesignWare I2C adapter. Only master mode is supported.
+
+ This driver can also be built as a module. If so, the module
+ will be called i2c-designware.
+
config I2C_GPIO
tristate "GPIO-based bitbanging I2C"
depends on GENERIC_GPIO
diff --git a/drivers/i2c/busses/Makefile b/drivers/i2c/busses/Makefile
index edeabf003106..e654263bfc01 100644
--- a/drivers/i2c/busses/Makefile
+++ b/drivers/i2c/busses/Makefile
@@ -30,6 +30,7 @@ obj-$(CONFIG_I2C_AU1550) += i2c-au1550.o
obj-$(CONFIG_I2C_BLACKFIN_TWI) += i2c-bfin-twi.o
obj-$(CONFIG_I2C_CPM) += i2c-cpm.o
obj-$(CONFIG_I2C_DAVINCI) += i2c-davinci.o
+obj-$(CONFIG_I2C_DESIGNWARE) += i2c-designware.o
obj-$(CONFIG_I2C_GPIO) += i2c-gpio.o
obj-$(CONFIG_I2C_HIGHLANDER) += i2c-highlander.o
obj-$(CONFIG_I2C_IBM_IIC) += i2c-ibm_iic.o
diff --git a/drivers/i2c/busses/i2c-cpm.c b/drivers/i2c/busses/i2c-cpm.c
index b5db8b883615..9c2e10082b79 100644
--- a/drivers/i2c/busses/i2c-cpm.c
+++ b/drivers/i2c/busses/i2c-cpm.c
@@ -140,7 +140,7 @@ static irqreturn_t cpm_i2c_interrupt(int irq, void *dev_id)
dev_dbg(&adap->dev, "Interrupt: %x\n", i);
- wake_up_interruptible(&cpm->i2c_wait);
+ wake_up(&cpm->i2c_wait);
return i ? IRQ_HANDLED : IRQ_NONE;
}
@@ -364,12 +364,12 @@ static int cpm_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
dev_dbg(&adap->dev, "test ready.\n");
pmsg = &msgs[tptr];
if (pmsg->flags & I2C_M_RD)
- ret = wait_event_interruptible_timeout(cpm->i2c_wait,
+ ret = wait_event_timeout(cpm->i2c_wait,
(in_be16(&tbdf[tptr].cbd_sc) & BD_SC_NAK) ||
!(in_be16(&rbdf[rptr].cbd_sc) & BD_SC_EMPTY),
1 * HZ);
else
- ret = wait_event_interruptible_timeout(cpm->i2c_wait,
+ ret = wait_event_timeout(cpm->i2c_wait,
!(in_be16(&tbdf[tptr].cbd_sc) & BD_SC_READY),
1 * HZ);
if (ret == 0) {
diff --git a/drivers/i2c/busses/i2c-designware.c b/drivers/i2c/busses/i2c-designware.c
new file mode 100644
index 000000000000..b444762e9b9f
--- /dev/null
+++ b/drivers/i2c/busses/i2c-designware.c
@@ -0,0 +1,624 @@
+/*
+ * Synopsys Designware I2C adapter driver (master only).
+ *
+ * Based on the TI DAVINCI I2C adapter driver.
+ *
+ * Copyright (C) 2006 Texas Instruments.
+ * Copyright (C) 2007 MontaVista Software Inc.
+ * Copyright (C) 2009 Provigent Ltd.
+ *
+ * ----------------------------------------------------------------------------
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ * ----------------------------------------------------------------------------
+ *
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/clk.h>
+#include <linux/errno.h>
+#include <linux/sched.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+
+/*
+ * Registers offset
+ */
+#define DW_IC_CON 0x0
+#define DW_IC_TAR 0x4
+#define DW_IC_DATA_CMD 0x10
+#define DW_IC_SS_SCL_HCNT 0x14
+#define DW_IC_SS_SCL_LCNT 0x18
+#define DW_IC_FS_SCL_HCNT 0x1c
+#define DW_IC_FS_SCL_LCNT 0x20
+#define DW_IC_INTR_STAT 0x2c
+#define DW_IC_INTR_MASK 0x30
+#define DW_IC_CLR_INTR 0x40
+#define DW_IC_ENABLE 0x6c
+#define DW_IC_STATUS 0x70
+#define DW_IC_TXFLR 0x74
+#define DW_IC_RXFLR 0x78
+#define DW_IC_COMP_PARAM_1 0xf4
+#define DW_IC_TX_ABRT_SOURCE 0x80
+
+#define DW_IC_CON_MASTER 0x1
+#define DW_IC_CON_SPEED_STD 0x2
+#define DW_IC_CON_SPEED_FAST 0x4
+#define DW_IC_CON_10BITADDR_MASTER 0x10
+#define DW_IC_CON_RESTART_EN 0x20
+#define DW_IC_CON_SLAVE_DISABLE 0x40
+
+#define DW_IC_INTR_TX_EMPTY 0x10
+#define DW_IC_INTR_TX_ABRT 0x40
+#define DW_IC_INTR_STOP_DET 0x200
+
+#define DW_IC_STATUS_ACTIVITY 0x1
+
+#define DW_IC_ERR_TX_ABRT 0x1
+
+/*
+ * status codes
+ */
+#define STATUS_IDLE 0x0
+#define STATUS_WRITE_IN_PROGRESS 0x1
+#define STATUS_READ_IN_PROGRESS 0x2
+
+#define TIMEOUT 20 /* ms */
+
+/*
+ * hardware abort codes from the DW_IC_TX_ABRT_SOURCE register
+ *
+ * only expected abort codes are listed here
+ * refer to the datasheet for the full list
+ */
+#define ABRT_7B_ADDR_NOACK 0
+#define ABRT_10ADDR1_NOACK 1
+#define ABRT_10ADDR2_NOACK 2
+#define ABRT_TXDATA_NOACK 3
+#define ABRT_GCALL_NOACK 4
+#define ABRT_GCALL_READ 5
+#define ABRT_SBYTE_ACKDET 7
+#define ABRT_SBYTE_NORSTRT 9
+#define ABRT_10B_RD_NORSTRT 10
+#define ARB_MASTER_DIS 11
+#define ARB_LOST 12
+
+static char *abort_sources[] = {
+ [ABRT_7B_ADDR_NOACK] =
+ "slave address not acknowledged (7bit mode)",
+ [ABRT_10ADDR1_NOACK] =
+ "first address byte not acknowledged (10bit mode)",
+ [ABRT_10ADDR2_NOACK] =
+ "second address byte not acknowledged (10bit mode)",
+ [ABRT_TXDATA_NOACK] =
+ "data not acknowledged",
+ [ABRT_GCALL_NOACK] =
+ "no acknowledgement for a general call",
+ [ABRT_GCALL_READ] =
+ "read after general call",
+ [ABRT_SBYTE_ACKDET] =
+ "start byte acknowledged",
+ [ABRT_SBYTE_NORSTRT] =
+ "trying to send start byte when restart is disabled",
+ [ABRT_10B_RD_NORSTRT] =
+ "trying to read when restart is disabled (10bit mode)",
+ [ARB_MASTER_DIS] =
+ "trying to use disabled adapter",
+ [ARB_LOST] =
+ "lost arbitration",
+};
+
+/**
+ * struct dw_i2c_dev - private i2c-designware data
+ * @dev: driver model device node
+ * @base: IO registers pointer
+ * @cmd_complete: tx completion indicator
+ * @pump_msg: continue in progress transfers
+ * @lock: protect this struct and IO registers
+ * @clk: input reference clock
+ * @cmd_err: run time hadware error code
+ * @msgs: points to an array of messages currently being transfered
+ * @msgs_num: the number of elements in msgs
+ * @msg_write_idx: the element index of the current tx message in the msgs
+ * array
+ * @tx_buf_len: the length of the current tx buffer
+ * @tx_buf: the current tx buffer
+ * @msg_read_idx: the element index of the current rx message in the msgs
+ * array
+ * @rx_buf_len: the length of the current rx buffer
+ * @rx_buf: the current rx buffer
+ * @msg_err: error status of the current transfer
+ * @status: i2c master status, one of STATUS_*
+ * @abort_source: copy of the TX_ABRT_SOURCE register
+ * @irq: interrupt number for the i2c master
+ * @adapter: i2c subsystem adapter node
+ * @tx_fifo_depth: depth of the hardware tx fifo
+ * @rx_fifo_depth: depth of the hardware rx fifo
+ */
+struct dw_i2c_dev {
+ struct device *dev;
+ void __iomem *base;
+ struct completion cmd_complete;
+ struct tasklet_struct pump_msg;
+ struct mutex lock;
+ struct clk *clk;
+ int cmd_err;
+ struct i2c_msg *msgs;
+ int msgs_num;
+ int msg_write_idx;
+ u16 tx_buf_len;
+ u8 *tx_buf;
+ int msg_read_idx;
+ u16 rx_buf_len;
+ u8 *rx_buf;
+ int msg_err;
+ unsigned int status;
+ u16 abort_source;
+ int irq;
+ struct i2c_adapter adapter;
+ unsigned int tx_fifo_depth;
+ unsigned int rx_fifo_depth;
+};
+
+/**
+ * i2c_dw_init() - initialize the designware i2c master hardware
+ * @dev: device private data
+ *
+ * This functions configures and enables the I2C master.
+ * This function is called during I2C init function, and in case of timeout at
+ * run time.
+ */
+static void i2c_dw_init(struct dw_i2c_dev *dev)
+{
+ u32 input_clock_khz = clk_get_rate(dev->clk) / 1000;
+ u16 ic_con;
+
+ /* Disable the adapter */
+ writeb(0, dev->base + DW_IC_ENABLE);
+
+ /* set standard and fast speed deviders for high/low periods */
+ writew((input_clock_khz * 40 / 10000)+1, /* std speed high, 4us */
+ dev->base + DW_IC_SS_SCL_HCNT);
+ writew((input_clock_khz * 47 / 10000)+1, /* std speed low, 4.7us */
+ dev->base + DW_IC_SS_SCL_LCNT);
+ writew((input_clock_khz * 6 / 10000)+1, /* fast speed high, 0.6us */
+ dev->base + DW_IC_FS_SCL_HCNT);
+ writew((input_clock_khz * 13 / 10000)+1, /* fast speed low, 1.3us */
+ dev->base + DW_IC_FS_SCL_LCNT);
+
+ /* configure the i2c master */
+ ic_con = DW_IC_CON_MASTER | DW_IC_CON_SLAVE_DISABLE |
+ DW_IC_CON_RESTART_EN | DW_IC_CON_SPEED_FAST;
+ writew(ic_con, dev->base + DW_IC_CON);
+}
+
+/*
+ * Waiting for bus not busy
+ */
+static int i2c_dw_wait_bus_not_busy(struct dw_i2c_dev *dev)
+{
+ int timeout = TIMEOUT;
+
+ while (readb(dev->base + DW_IC_STATUS) & DW_IC_STATUS_ACTIVITY) {
+ if (timeout <= 0) {
+ dev_warn(dev->dev, "timeout waiting for bus ready\n");
+ return -ETIMEDOUT;
+ }
+ timeout--;
+ mdelay(1);
+ }
+
+ return 0;
+}
+
+/*
+ * Initiate low level master read/write transaction.
+ * This function is called from i2c_dw_xfer when starting a transfer.
+ * This function is also called from dw_i2c_pump_msg to continue a transfer
+ * that is longer than the size of the TX FIFO.
+ */
+static void
+i2c_dw_xfer_msg(struct i2c_adapter *adap)
+{
+ struct dw_i2c_dev *dev = i2c_get_adapdata(adap);
+ struct i2c_msg *msgs = dev->msgs;
+ int num = dev->msgs_num;
+ u16 ic_con, intr_mask;
+ int tx_limit = dev->tx_fifo_depth - readb(dev->base + DW_IC_TXFLR);
+ int rx_limit = dev->rx_fifo_depth - readb(dev->base + DW_IC_RXFLR);
+ u16 addr = msgs[dev->msg_write_idx].addr;
+ u16 buf_len = dev->tx_buf_len;
+
+ if (!(dev->status & STATUS_WRITE_IN_PROGRESS)) {
+ /* Disable the adapter */
+ writeb(0, dev->base + DW_IC_ENABLE);
+
+ /* set the slave (target) address */
+ writew(msgs[dev->msg_write_idx].addr, dev->base + DW_IC_TAR);
+
+ /* if the slave address is ten bit address, enable 10BITADDR */
+ ic_con = readw(dev->base + DW_IC_CON);
+ if (msgs[dev->msg_write_idx].flags & I2C_M_TEN)
+ ic_con |= DW_IC_CON_10BITADDR_MASTER;
+ else
+ ic_con &= ~DW_IC_CON_10BITADDR_MASTER;
+ writew(ic_con, dev->base + DW_IC_CON);
+
+ /* Enable the adapter */
+ writeb(1, dev->base + DW_IC_ENABLE);
+ }
+
+ for (; dev->msg_write_idx < num; dev->msg_write_idx++) {
+ /* if target address has changed, we need to
+ * reprogram the target address in the i2c
+ * adapter when we are done with this transfer
+ */
+ if (msgs[dev->msg_write_idx].addr != addr)
+ return;
+
+ if (msgs[dev->msg_write_idx].len == 0) {
+ dev_err(dev->dev,
+ "%s: invalid message length\n", __func__);
+ dev->msg_err = -EINVAL;
+ return;
+ }
+
+ if (!(dev->status & STATUS_WRITE_IN_PROGRESS)) {
+ /* new i2c_msg */
+ dev->tx_buf = msgs[dev->msg_write_idx].buf;
+ buf_len = msgs[dev->msg_write_idx].len;
+ }
+
+ while (buf_len > 0 && tx_limit > 0 && rx_limit > 0) {
+ if (msgs[dev->msg_write_idx].flags & I2C_M_RD) {
+ writew(0x100, dev->base + DW_IC_DATA_CMD);
+ rx_limit--;
+ } else
+ writew(*(dev->tx_buf++),
+ dev->base + DW_IC_DATA_CMD);
+ tx_limit--; buf_len--;
+ }
+ }
+
+ intr_mask = DW_IC_INTR_STOP_DET | DW_IC_INTR_TX_ABRT;
+ if (buf_len > 0) { /* more bytes to be written */
+ intr_mask |= DW_IC_INTR_TX_EMPTY;
+ dev->status |= STATUS_WRITE_IN_PROGRESS;
+ } else
+ dev->status &= ~STATUS_WRITE_IN_PROGRESS;
+ writew(intr_mask, dev->base + DW_IC_INTR_MASK);
+
+ dev->tx_buf_len = buf_len;
+}
+
+static void
+i2c_dw_read(struct i2c_adapter *adap)
+{
+ struct dw_i2c_dev *dev = i2c_get_adapdata(adap);
+ struct i2c_msg *msgs = dev->msgs;
+ int num = dev->msgs_num;
+ u16 addr = msgs[dev->msg_read_idx].addr;
+ int rx_valid = readw(dev->base + DW_IC_RXFLR);
+
+ for (; dev->msg_read_idx < num; dev->msg_read_idx++) {
+ u16 len;
+ u8 *buf;
+
+ if (!(msgs[dev->msg_read_idx].flags & I2C_M_RD))
+ continue;
+
+ /* different i2c client, reprogram the i2c adapter */
+ if (msgs[dev->msg_read_idx].addr != addr)
+ return;
+
+ if (!(dev->status & STATUS_READ_IN_PROGRESS)) {
+ len = msgs[dev->msg_read_idx].len;
+ buf = msgs[dev->msg_read_idx].buf;
+ } else {
+ len = dev->rx_buf_len;
+ buf = dev->rx_buf;
+ }
+
+ for (; len > 0 && rx_valid > 0; len--, rx_valid--)
+ *buf++ = readb(dev->base + DW_IC_DATA_CMD);
+
+ if (len > 0) {
+ dev->status |= STATUS_READ_IN_PROGRESS;
+ dev->rx_buf_len = len;
+ dev->rx_buf = buf;
+ return;
+ } else
+ dev->status &= ~STATUS_READ_IN_PROGRESS;
+ }
+}
+
+/*
+ * Prepare controller for a transaction and call i2c_dw_xfer_msg
+ */
+static int
+i2c_dw_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
+{
+ struct dw_i2c_dev *dev = i2c_get_adapdata(adap);
+ int ret;
+
+ dev_dbg(dev->dev, "%s: msgs: %d\n", __func__, num);
+
+ mutex_lock(&dev->lock);
+
+ INIT_COMPLETION(dev->cmd_complete);
+ dev->msgs = msgs;
+ dev->msgs_num = num;
+ dev->cmd_err = 0;
+ dev->msg_write_idx = 0;
+ dev->msg_read_idx = 0;
+ dev->msg_err = 0;
+ dev->status = STATUS_IDLE;
+
+ ret = i2c_dw_wait_bus_not_busy(dev);
+ if (ret < 0)
+ goto done;
+
+ /* start the transfers */
+ i2c_dw_xfer_msg(adap);
+
+ /* wait for tx to complete */
+ ret = wait_for_completion_interruptible_timeout(&dev->cmd_complete, HZ);
+ if (ret == 0) {
+ dev_err(dev->dev, "controller timed out\n");
+ i2c_dw_init(dev);
+ ret = -ETIMEDOUT;
+ goto done;
+ } else if (ret < 0)
+ goto done;
+
+ if (dev->msg_err) {
+ ret = dev->msg_err;
+ goto done;
+ }
+
+ /* no error */
+ if (likely(!dev->cmd_err)) {
+ /* read rx fifo, and disable the adapter */
+ do {
+ i2c_dw_read(adap);
+ } while (dev->status & STATUS_READ_IN_PROGRESS);
+ writeb(0, dev->base + DW_IC_ENABLE);
+ ret = num;
+ goto done;
+ }
+
+ /* We have an error */
+ if (dev->cmd_err == DW_IC_ERR_TX_ABRT) {
+ unsigned long abort_source = dev->abort_source;
+ int i;
+
+ for_each_bit(i, &abort_source, ARRAY_SIZE(abort_sources)) {
+ dev_err(dev->dev, "%s: %s\n", __func__, abort_sources[i]);
+ }
+ }
+ ret = -EIO;
+
+done:
+ mutex_unlock(&dev->lock);
+
+ return ret;
+}
+
+static u32 i2c_dw_func(struct i2c_adapter *adap)
+{
+ return I2C_FUNC_I2C | I2C_FUNC_10BIT_ADDR;
+}
+
+static void dw_i2c_pump_msg(unsigned long data)
+{
+ struct dw_i2c_dev *dev = (struct dw_i2c_dev *) data;
+ u16 intr_mask;
+
+ i2c_dw_read(&dev->adapter);
+ i2c_dw_xfer_msg(&dev->adapter);
+
+ intr_mask = DW_IC_INTR_STOP_DET | DW_IC_INTR_TX_ABRT;
+ if (dev->status & STATUS_WRITE_IN_PROGRESS)
+ intr_mask |= DW_IC_INTR_TX_EMPTY;
+ writew(intr_mask, dev->base + DW_IC_INTR_MASK);
+}
+
+/*
+ * Interrupt service routine. This gets called whenever an I2C interrupt
+ * occurs.
+ */
+static irqreturn_t i2c_dw_isr(int this_irq, void *dev_id)
+{
+ struct dw_i2c_dev *dev = dev_id;
+ u16 stat;
+
+ stat = readw(dev->base + DW_IC_INTR_STAT);
+ dev_dbg(dev->dev, "%s: stat=0x%x\n", __func__, stat);
+ if (stat & DW_IC_INTR_TX_ABRT) {
+ dev->abort_source = readw(dev->base + DW_IC_TX_ABRT_SOURCE);
+ dev->cmd_err |= DW_IC_ERR_TX_ABRT;
+ dev->status = STATUS_IDLE;
+ } else if (stat & DW_IC_INTR_TX_EMPTY)
+ tasklet_schedule(&dev->pump_msg);
+
+ readb(dev->base + DW_IC_CLR_INTR); /* clear interrupts */
+ writew(0, dev->base + DW_IC_INTR_MASK); /* disable interrupts */
+ if (stat & (DW_IC_INTR_TX_ABRT | DW_IC_INTR_STOP_DET))
+ complete(&dev->cmd_complete);
+
+ return IRQ_HANDLED;
+}
+
+static struct i2c_algorithm i2c_dw_algo = {
+ .master_xfer = i2c_dw_xfer,
+ .functionality = i2c_dw_func,
+};
+
+static int __devinit dw_i2c_probe(struct platform_device *pdev)
+{
+ struct dw_i2c_dev *dev;
+ struct i2c_adapter *adap;
+ struct resource *mem, *irq, *ioarea;
+ int r;
+
+ /* NOTE: driver uses the static register mapping */
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!mem) {
+ dev_err(&pdev->dev, "no mem resource?\n");
+ return -EINVAL;
+ }
+
+ irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (!irq) {
+ dev_err(&pdev->dev, "no irq resource?\n");
+ return -EINVAL;
+ }
+
+ ioarea = request_mem_region(mem->start, resource_size(mem),
+ pdev->name);
+ if (!ioarea) {
+ dev_err(&pdev->dev, "I2C region already claimed\n");
+ return -EBUSY;
+ }
+
+ dev = kzalloc(sizeof(struct dw_i2c_dev), GFP_KERNEL);
+ if (!dev) {
+ r = -ENOMEM;
+ goto err_release_region;
+ }
+
+ init_completion(&dev->cmd_complete);
+ tasklet_init(&dev->pump_msg, dw_i2c_pump_msg, (unsigned long) dev);
+ mutex_init(&dev->lock);
+ dev->dev = get_device(&pdev->dev);
+ dev->irq = irq->start;
+ platform_set_drvdata(pdev, dev);
+
+ dev->clk = clk_get(&pdev->dev, NULL);
+ if (IS_ERR(dev->clk)) {
+ r = -ENODEV;
+ goto err_free_mem;
+ }
+ clk_enable(dev->clk);
+
+ dev->base = ioremap(mem->start, resource_size(mem));
+ if (dev->base == NULL) {
+ dev_err(&pdev->dev, "failure mapping io resources\n");
+ r = -EBUSY;
+ goto err_unuse_clocks;
+ }
+ {
+ u32 param1 = readl(dev->base + DW_IC_COMP_PARAM_1);
+
+ dev->tx_fifo_depth = ((param1 >> 16) & 0xff) + 1;
+ dev->rx_fifo_depth = ((param1 >> 8) & 0xff) + 1;
+ }
+ i2c_dw_init(dev);
+
+ writew(0, dev->base + DW_IC_INTR_MASK); /* disable IRQ */
+ r = request_irq(dev->irq, i2c_dw_isr, 0, pdev->name, dev);
+ if (r) {
+ dev_err(&pdev->dev, "failure requesting irq %i\n", dev->irq);
+ goto err_iounmap;
+ }
+
+ adap = &dev->adapter;
+ i2c_set_adapdata(adap, dev);
+ adap->owner = THIS_MODULE;
+ adap->class = I2C_CLASS_HWMON;
+ strlcpy(adap->name, "Synopsys DesignWare I2C adapter",
+ sizeof(adap->name));
+ adap->algo = &i2c_dw_algo;
+ adap->dev.parent = &pdev->dev;
+
+ adap->nr = pdev->id;
+ r = i2c_add_numbered_adapter(adap);
+ if (r) {
+ dev_err(&pdev->dev, "failure adding adapter\n");
+ goto err_free_irq;
+ }
+
+ return 0;
+
+err_free_irq:
+ free_irq(dev->irq, dev);
+err_iounmap:
+ iounmap(dev->base);
+err_unuse_clocks:
+ clk_disable(dev->clk);
+ clk_put(dev->clk);
+ dev->clk = NULL;
+err_free_mem:
+ platform_set_drvdata(pdev, NULL);
+ put_device(&pdev->dev);
+ kfree(dev);
+err_release_region:
+ release_mem_region(mem->start, resource_size(mem));
+
+ return r;
+}
+
+static int __devexit dw_i2c_remove(struct platform_device *pdev)
+{
+ struct dw_i2c_dev *dev = platform_get_drvdata(pdev);
+ struct resource *mem;
+
+ platform_set_drvdata(pdev, NULL);
+ i2c_del_adapter(&dev->adapter);
+ put_device(&pdev->dev);
+
+ clk_disable(dev->clk);
+ clk_put(dev->clk);
+ dev->clk = NULL;
+
+ writeb(0, dev->base + DW_IC_ENABLE);
+ free_irq(dev->irq, dev);
+ kfree(dev);
+
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ release_mem_region(mem->start, resource_size(mem));
+ return 0;
+}
+
+/* work with hotplug and coldplug */
+MODULE_ALIAS("platform:i2c_designware");
+
+static struct platform_driver dw_i2c_driver = {
+ .remove = __devexit_p(dw_i2c_remove),
+ .driver = {
+ .name = "i2c_designware",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init dw_i2c_init_driver(void)
+{
+ return platform_driver_probe(&dw_i2c_driver, dw_i2c_probe);
+}
+module_init(dw_i2c_init_driver);
+
+static void __exit dw_i2c_exit_driver(void)
+{
+ platform_driver_unregister(&dw_i2c_driver);
+}
+module_exit(dw_i2c_exit_driver);
+
+MODULE_AUTHOR("Baruch Siach <baruch@tkos.co.il>");
+MODULE_DESCRIPTION("Synopsys DesignWare I2C bus adapter");
+MODULE_LICENSE("GPL");
diff --git a/drivers/i2c/busses/i2c-ibm_iic.c b/drivers/i2c/busses/i2c-ibm_iic.c
index e4476743f203..b1bc6e277d2a 100644
--- a/drivers/i2c/busses/i2c-ibm_iic.c
+++ b/drivers/i2c/busses/i2c-ibm_iic.c
@@ -85,10 +85,11 @@ static void dump_iic_regs(const char* header, struct ibm_iic_private* dev)
{
volatile struct iic_regs __iomem *iic = dev->vaddr;
printk(KERN_DEBUG "ibm-iic%d: %s\n", dev->idx, header);
- printk(KERN_DEBUG " cntl = 0x%02x, mdcntl = 0x%02x\n"
- KERN_DEBUG " sts = 0x%02x, extsts = 0x%02x\n"
- KERN_DEBUG " clkdiv = 0x%02x, xfrcnt = 0x%02x\n"
- KERN_DEBUG " xtcntlss = 0x%02x, directcntl = 0x%02x\n",
+ printk(KERN_DEBUG
+ " cntl = 0x%02x, mdcntl = 0x%02x\n"
+ " sts = 0x%02x, extsts = 0x%02x\n"
+ " clkdiv = 0x%02x, xfrcnt = 0x%02x\n"
+ " xtcntlss = 0x%02x, directcntl = 0x%02x\n",
in_8(&iic->cntl), in_8(&iic->mdcntl), in_8(&iic->sts),
in_8(&iic->extsts), in_8(&iic->clkdiv), in_8(&iic->xfrcnt),
in_8(&iic->xtcntlss), in_8(&iic->directcntl));
diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c
index b606db85525d..ad8d2010c921 100644
--- a/drivers/i2c/busses/i2c-omap.c
+++ b/drivers/i2c/busses/i2c-omap.c
@@ -339,7 +339,7 @@ static int omap_i2c_init(struct omap_i2c_dev *dev)
* to get longer filter period for better noise suppression.
* The filter is iclk (fclk for HS) period.
*/
- if (dev->speed > 400 || cpu_is_omap_2430())
+ if (dev->speed > 400 || cpu_is_omap2430())
internal_clk = 19200;
else if (dev->speed > 100)
internal_clk = 9600;
diff --git a/drivers/ide/cmd64x.c b/drivers/ide/cmd64x.c
index 03c86209446f..680e5975217f 100644
--- a/drivers/ide/cmd64x.c
+++ b/drivers/ide/cmd64x.c
@@ -389,8 +389,7 @@ static const struct ide_port_info cmd64x_chipsets[] __devinitdata = {
.init_chipset = init_chipset_cmd64x,
.enablebits = {{0x51,0x04,0x04}, {0x51,0x08,0x08}},
.port_ops = &cmd648_port_ops,
- .host_flags = IDE_HFLAG_SERIALIZE |
- IDE_HFLAG_ABUSE_PREFETCH,
+ .host_flags = IDE_HFLAG_ABUSE_PREFETCH,
.pio_mask = ATA_PIO5,
.mwdma_mask = ATA_MWDMA2,
.udma_mask = ATA_UDMA2,
diff --git a/drivers/ide/cs5520.c b/drivers/ide/cs5520.c
index bd066bb9d611..09f98ed0731f 100644
--- a/drivers/ide/cs5520.c
+++ b/drivers/ide/cs5520.c
@@ -135,6 +135,7 @@ static int __devinit cs5520_init_one(struct pci_dev *dev, const struct pci_devic
ide_pci_setup_ports(dev, d, &hw[0], &hws[0]);
hw[0].irq = 14;
+ hw[1].irq = 15;
return ide_host_add(d, hws, 2, NULL);
}
diff --git a/drivers/ide/ide-acpi.c b/drivers/ide/ide-acpi.c
index 77f79d26b264..c509c9916464 100644
--- a/drivers/ide/ide-acpi.c
+++ b/drivers/ide/ide-acpi.c
@@ -92,6 +92,11 @@ int ide_acpi_init(void)
return 0;
}
+bool ide_port_acpi(ide_hwif_t *hwif)
+{
+ return ide_noacpi == 0 && hwif->acpidata;
+}
+
/**
* ide_get_dev_handle - finds acpi_handle and PCI device.function
* @dev: device to locate
@@ -352,9 +357,6 @@ int ide_acpi_exec_tfs(ide_drive_t *drive)
unsigned long gtf_address;
unsigned long obj_loc;
- if (ide_noacpi)
- return 0;
-
DEBPRINT("call get_GTF, drive=%s port=%d\n", drive->name, drive->dn);
ret = do_drive_get_GTF(drive, &gtf_length, &gtf_address, &obj_loc);
@@ -389,16 +391,6 @@ void ide_acpi_get_timing(ide_hwif_t *hwif)
struct acpi_buffer output;
union acpi_object *out_obj;
- if (ide_noacpi)
- return;
-
- DEBPRINT("ENTER:\n");
-
- if (!hwif->acpidata) {
- DEBPRINT("no ACPI data for %s\n", hwif->name);
- return;
- }
-
/* Setting up output buffer for _GTM */
output.length = ACPI_ALLOCATE_BUFFER;
output.pointer = NULL; /* ACPI-CA sets this; save/free it later */
@@ -479,16 +471,6 @@ void ide_acpi_push_timing(ide_hwif_t *hwif)
struct ide_acpi_drive_link *master = &hwif->acpidata->master;
struct ide_acpi_drive_link *slave = &hwif->acpidata->slave;
- if (ide_noacpi)
- return;
-
- DEBPRINT("ENTER:\n");
-
- if (!hwif->acpidata) {
- DEBPRINT("no ACPI data for %s\n", hwif->name);
- return;
- }
-
/* Give the GTM buffer + drive Identify data to the channel via the
* _STM method: */
/* setup input parameters buffer for _STM */
@@ -527,16 +509,11 @@ void ide_acpi_set_state(ide_hwif_t *hwif, int on)
ide_drive_t *drive;
int i;
- if (ide_noacpi || ide_noacpi_psx)
+ if (ide_noacpi_psx)
return;
DEBPRINT("ENTER:\n");
- if (!hwif->acpidata) {
- DEBPRINT("no ACPI data for %s\n", hwif->name);
- return;
- }
-
/* channel first and then drives for power on and verse versa for power off */
if (on)
acpi_bus_set_power(hwif->acpidata->obj_handle, ACPI_STATE_D0);
@@ -616,7 +593,7 @@ void ide_acpi_port_init_devices(ide_hwif_t *hwif)
drive->name, err);
}
- if (!ide_acpionboot) {
+ if (ide_noacpi || ide_acpionboot == 0) {
DEBPRINT("ACPI methods disabled on boot\n");
return;
}
diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
index 4a19686fcfe9..6a9a769bffc1 100644
--- a/drivers/ide/ide-cd.c
+++ b/drivers/ide/ide-cd.c
@@ -592,9 +592,19 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive)
}
} else if (!blk_pc_request(rq)) {
ide_cd_request_sense_fixup(drive, cmd);
- /* complain if we still have data left to transfer */
+
uptodate = cmd->nleft ? 0 : 1;
- if (uptodate == 0)
+
+ /*
+ * suck out the remaining bytes from the drive in an
+ * attempt to complete the data xfer. (see BZ#13399)
+ */
+ if (!(stat & ATA_ERR) && !uptodate && thislen) {
+ ide_pio_bytes(drive, cmd, write, thislen);
+ uptodate = cmd->nleft ? 0 : 1;
+ }
+
+ if (!uptodate)
rq->cmd_flags |= REQ_FAILED;
}
goto out_end;
@@ -876,9 +886,12 @@ static int cdrom_read_capacity(ide_drive_t *drive, unsigned long *capacity,
return stat;
/*
- * Sanity check the given block size
+ * Sanity check the given block size, in so far as making
+ * sure the sectors_per_frame we give to the caller won't
+ * end up being bogus.
*/
blocklen = be32_to_cpu(capbuf.blocklen);
+ blocklen = (blocklen >> SECTOR_BITS) << SECTOR_BITS;
switch (blocklen) {
case 512:
case 1024:
@@ -886,10 +899,9 @@ static int cdrom_read_capacity(ide_drive_t *drive, unsigned long *capacity,
case 4096:
break;
default:
- printk(KERN_ERR PFX "%s: weird block size %u\n",
+ printk_once(KERN_ERR PFX "%s: weird block size %u; "
+ "setting default block size to 2048\n",
drive->name, blocklen);
- printk(KERN_ERR PFX "%s: default to 2kb block size\n",
- drive->name);
blocklen = 2048;
break;
}
diff --git a/drivers/ide/ide-devsets.c b/drivers/ide/ide-devsets.c
index 5bf958e5b1d5..1099bf7cf968 100644
--- a/drivers/ide/ide-devsets.c
+++ b/drivers/ide/ide-devsets.c
@@ -183,6 +183,6 @@ ide_startstop_t ide_do_devset(ide_drive_t *drive, struct request *rq)
err = setfunc(drive, *(int *)&rq->cmd[1]);
if (err)
rq->errors = err;
- ide_complete_rq(drive, err, ide_rq_bytes(rq));
+ ide_complete_rq(drive, err, blk_rq_bytes(rq));
return ide_stopped;
}
diff --git a/drivers/ide/ide-dma.c b/drivers/ide/ide-dma.c
index 219e6fb78dc6..ee58c88dee5a 100644
--- a/drivers/ide/ide-dma.c
+++ b/drivers/ide/ide-dma.c
@@ -361,9 +361,6 @@ static int ide_tune_dma(ide_drive_t *drive)
if (__ide_dma_bad_drive(drive))
return 0;
- if (ide_id_dma_bug(drive))
- return 0;
-
if (hwif->host_flags & IDE_HFLAG_TRUST_BIOS_FOR_DMA)
return config_drive_for_dma(drive);
@@ -394,24 +391,6 @@ static int ide_dma_check(ide_drive_t *drive)
return -1;
}
-int ide_id_dma_bug(ide_drive_t *drive)
-{
- u16 *id = drive->id;
-
- if (id[ATA_ID_FIELD_VALID] & 4) {
- if ((id[ATA_ID_UDMA_MODES] >> 8) &&
- (id[ATA_ID_MWDMA_MODES] >> 8))
- goto err_out;
- } else if ((id[ATA_ID_MWDMA_MODES] >> 8) &&
- (id[ATA_ID_SWDMA_MODES] >> 8))
- goto err_out;
-
- return 0;
-err_out:
- printk(KERN_ERR "%s: bad DMA info in identify block\n", drive->name);
- return 1;
-}
-
int ide_set_dma(ide_drive_t *drive)
{
int rc;
diff --git a/drivers/ide/ide-eh.c b/drivers/ide/ide-eh.c
index 2b9141979613..e9abf2c3c335 100644
--- a/drivers/ide/ide-eh.c
+++ b/drivers/ide/ide-eh.c
@@ -149,7 +149,7 @@ static inline void ide_complete_drive_reset(ide_drive_t *drive, int err)
if (rq && blk_special_request(rq) && rq->cmd[0] == REQ_DRIVE_RESET) {
if (err <= 0 && rq->errors == 0)
rq->errors = -EIO;
- ide_complete_rq(drive, err ? err : 0, ide_rq_bytes(rq));
+ ide_complete_rq(drive, err ? err : 0, blk_rq_bytes(rq));
}
}
diff --git a/drivers/ide/ide-floppy.c b/drivers/ide/ide-floppy.c
index 8b3f204f7d73..fefbdfc8db06 100644
--- a/drivers/ide/ide-floppy.c
+++ b/drivers/ide/ide-floppy.c
@@ -293,7 +293,7 @@ out_end:
drive->failed_pc = NULL;
if (blk_fs_request(rq) == 0 && rq->errors == 0)
rq->errors = -EIO;
- ide_complete_rq(drive, -EIO, ide_rq_bytes(rq));
+ ide_complete_rq(drive, -EIO, blk_rq_bytes(rq));
return ide_stopped;
}
diff --git a/drivers/ide/ide-io.c b/drivers/ide/ide-io.c
index 1059f809b809..db96138fefcd 100644
--- a/drivers/ide/ide-io.c
+++ b/drivers/ide/ide-io.c
@@ -112,16 +112,6 @@ void ide_complete_cmd(ide_drive_t *drive, struct ide_cmd *cmd, u8 stat, u8 err)
}
}
-/* obsolete, blk_rq_bytes() should be used instead */
-unsigned int ide_rq_bytes(struct request *rq)
-{
- if (blk_pc_request(rq))
- return blk_rq_bytes(rq);
- else
- return blk_rq_cur_sectors(rq) << 9;
-}
-EXPORT_SYMBOL_GPL(ide_rq_bytes);
-
int ide_complete_rq(ide_drive_t *drive, int error, unsigned int nr_bytes)
{
ide_hwif_t *hwif = drive->hwif;
@@ -152,14 +142,14 @@ void ide_kill_rq(ide_drive_t *drive, struct request *rq)
if ((media == ide_floppy || media == ide_tape) && drv_req) {
rq->errors = 0;
- ide_complete_rq(drive, 0, blk_rq_bytes(rq));
} else {
if (media == ide_tape)
rq->errors = IDE_DRV_ERROR_GENERAL;
else if (blk_fs_request(rq) == 0 && rq->errors == 0)
rq->errors = -EIO;
- ide_complete_rq(drive, -EIO, ide_rq_bytes(rq));
}
+
+ ide_complete_rq(drive, -EIO, blk_rq_bytes(rq));
}
static void ide_tf_set_specify_cmd(ide_drive_t *drive, struct ide_taskfile *tf)
diff --git a/drivers/ide/ide-ioctls.c b/drivers/ide/ide-ioctls.c
index 82f252c3ee6e..e246d3d3fbcc 100644
--- a/drivers/ide/ide-ioctls.c
+++ b/drivers/ide/ide-ioctls.c
@@ -64,7 +64,8 @@ static int ide_get_identity_ioctl(ide_drive_t *drive, unsigned int cmd,
goto out;
}
- id = kmalloc(size, GFP_KERNEL);
+ /* ata_id_to_hd_driveid() relies on 'id' to be fully allocated. */
+ id = kmalloc(ATA_ID_WORDS * 2, GFP_KERNEL);
if (id == NULL) {
rc = -ENOMEM;
goto out;
diff --git a/drivers/ide/ide-iops.c b/drivers/ide/ide-iops.c
index fa047150a1c6..2892b242bbe1 100644
--- a/drivers/ide/ide-iops.c
+++ b/drivers/ide/ide-iops.c
@@ -210,6 +210,7 @@ EXPORT_SYMBOL_GPL(ide_in_drive_list);
*/
static const struct drive_list_entry ivb_list[] = {
{ "QUANTUM FIREBALLlct10 05" , "A03.0900" },
+ { "QUANTUM FIREBALLlct20 30" , "APL.0900" },
{ "TSSTcorp CDDVDW SH-S202J" , "SB00" },
{ "TSSTcorp CDDVDW SH-S202J" , "SB01" },
{ "TSSTcorp CDDVDW SH-S202N" , "SB00" },
@@ -329,9 +330,6 @@ int ide_driveid_update(ide_drive_t *drive)
kfree(id);
- if ((drive->dev_flags & IDE_DFLAG_USING_DMA) && ide_id_dma_bug(drive))
- ide_dma_off(drive);
-
return 1;
out_err:
if (rc == 2)
diff --git a/drivers/ide/ide-pm.c b/drivers/ide/ide-pm.c
index c14ca144cffe..ad7be2669dcb 100644
--- a/drivers/ide/ide-pm.c
+++ b/drivers/ide/ide-pm.c
@@ -10,9 +10,11 @@ int generic_ide_suspend(struct device *dev, pm_message_t mesg)
struct request_pm_state rqpm;
int ret;
- /* call ACPI _GTM only once */
- if ((drive->dn & 1) == 0 || pair == NULL)
- ide_acpi_get_timing(hwif);
+ if (ide_port_acpi(hwif)) {
+ /* call ACPI _GTM only once */
+ if ((drive->dn & 1) == 0 || pair == NULL)
+ ide_acpi_get_timing(hwif);
+ }
memset(&rqpm, 0, sizeof(rqpm));
rq = blk_get_request(drive->queue, READ, __GFP_WAIT);
@@ -26,9 +28,11 @@ int generic_ide_suspend(struct device *dev, pm_message_t mesg)
ret = blk_execute_rq(drive->queue, NULL, rq, 0);
blk_put_request(rq);
- /* call ACPI _PS3 only after both devices are suspended */
- if (ret == 0 && ((drive->dn & 1) || pair == NULL))
- ide_acpi_set_state(hwif, 0);
+ if (ret == 0 && ide_port_acpi(hwif)) {
+ /* call ACPI _PS3 only after both devices are suspended */
+ if ((drive->dn & 1) || pair == NULL)
+ ide_acpi_set_state(hwif, 0);
+ }
return ret;
}
@@ -42,13 +46,15 @@ int generic_ide_resume(struct device *dev)
struct request_pm_state rqpm;
int err;
- /* call ACPI _PS0 / _STM only once */
- if ((drive->dn & 1) == 0 || pair == NULL) {
- ide_acpi_set_state(hwif, 1);
- ide_acpi_push_timing(hwif);
- }
+ if (ide_port_acpi(hwif)) {
+ /* call ACPI _PS0 / _STM only once */
+ if ((drive->dn & 1) == 0 || pair == NULL) {
+ ide_acpi_set_state(hwif, 1);
+ ide_acpi_push_timing(hwif);
+ }
- ide_acpi_exec_tfs(drive);
+ ide_acpi_exec_tfs(drive);
+ }
memset(&rqpm, 0, sizeof(rqpm));
rq = blk_get_request(drive->queue, READ, __GFP_WAIT);
diff --git a/drivers/ide/ide-probe.c b/drivers/ide/ide-probe.c
index 51af4eea0d36..1bb106f6221a 100644
--- a/drivers/ide/ide-probe.c
+++ b/drivers/ide/ide-probe.c
@@ -818,6 +818,24 @@ static int ide_port_setup_devices(ide_hwif_t *hwif)
return j;
}
+static void ide_host_enable_irqs(struct ide_host *host)
+{
+ ide_hwif_t *hwif;
+ int i;
+
+ ide_host_for_each_port(i, hwif, host) {
+ if (hwif == NULL)
+ continue;
+
+ /* clear any pending IRQs */
+ hwif->tp_ops->read_status(hwif);
+
+ /* unmask IRQs */
+ if (hwif->io_ports.ctl_addr)
+ hwif->tp_ops->write_devctl(hwif, ATA_DEVCTL_OBS);
+ }
+}
+
/*
* This routine sets up the IRQ for an IDE interface.
*/
@@ -831,9 +849,6 @@ static int init_irq (ide_hwif_t *hwif)
if (irq_handler == NULL)
irq_handler = ide_intr;
- if (io_ports->ctl_addr)
- hwif->tp_ops->write_devctl(hwif, ATA_DEVCTL_OBS);
-
if (request_irq(hwif->irq, irq_handler, sa, hwif->name, hwif))
goto out_up;
@@ -1404,6 +1419,8 @@ int ide_host_register(struct ide_host *host, const struct ide_port_info *d,
ide_port_tune_devices(hwif);
}
+ ide_host_enable_irqs(host);
+
ide_host_for_each_port(i, hwif, host) {
if (hwif == NULL)
continue;
diff --git a/drivers/ieee1394/Kconfig b/drivers/ieee1394/Kconfig
index 95f45f9b8e5e..f102fcc7e52a 100644
--- a/drivers/ieee1394/Kconfig
+++ b/drivers/ieee1394/Kconfig
@@ -4,7 +4,7 @@ menu "IEEE 1394 (FireWire) support"
source "drivers/firewire/Kconfig"
config IEEE1394
- tristate "Stable FireWire stack"
+ tristate "Legacy alternative FireWire driver stack"
depends on PCI || BROKEN
help
IEEE 1394 describes a high performance serial bus, which is also
@@ -33,11 +33,9 @@ config IEEE1394_OHCI1394
module will be called ohci1394.
NOTE:
-
- You should only build either ohci1394 or the new firewire-ohci driver,
- but not both. If you nevertheless want to install both, you should
- configure them only as modules and blacklist the driver(s) which you
- don't want to have auto-loaded. Add either
+ If you want to install firewire-ohci and ohci1394 together, you
+ should configure them only as modules and blacklist the driver(s)
+ which you don't want to have auto-loaded. Add either
blacklist firewire-ohci
or
@@ -46,12 +44,7 @@ config IEEE1394_OHCI1394
blacklist dv1394
to /etc/modprobe.conf or /etc/modprobe.d/* and update modprobe.conf
- depending on your distribution. The latter two modules should be
- blacklisted together with ohci1394 because they depend on ohci1394.
-
- If you have an old modprobe which doesn't implement the blacklist
- directive, use "install modulename /bin/true" for the modules to be
- blacklisted.
+ depending on your distribution.
comment "PCILynx controller requires I2C"
depends on IEEE1394 && I2C=n
@@ -105,7 +98,7 @@ config IEEE1394_ETH1394_ROM_ENTRY
default n
config IEEE1394_ETH1394
- tristate "IP over 1394"
+ tristate "IP networking over 1394 (experimental)"
depends on IEEE1394 && EXPERIMENTAL && INET
select IEEE1394_ETH1394_ROM_ENTRY
help
diff --git a/drivers/ieee1394/sbp2.c b/drivers/ieee1394/sbp2.c
index 83b734aec923..52b25f8b111d 100644
--- a/drivers/ieee1394/sbp2.c
+++ b/drivers/ieee1394/sbp2.c
@@ -880,6 +880,7 @@ static struct sbp2_lu *sbp2_alloc_device(struct unit_directory *ud)
}
shost->hostdata[0] = (unsigned long)lu;
+ shost->max_cmd_len = SBP2_MAX_CDB_SIZE;
if (!scsi_add_host(shost, &ud->device)) {
lu->shost = shost;
diff --git a/drivers/ieee1394/sbp2.h b/drivers/ieee1394/sbp2.h
index c5036f1cc5b0..64a3a66a8a39 100644
--- a/drivers/ieee1394/sbp2.h
+++ b/drivers/ieee1394/sbp2.h
@@ -25,6 +25,12 @@
#define SBP2_DEVICE_NAME "sbp2"
/*
+ * There is no transport protocol limit to the CDB length, but we implement
+ * a fixed length only. 16 bytes is enough for disks larger than 2 TB.
+ */
+#define SBP2_MAX_CDB_SIZE 16
+
+/*
* SBP-2 specific definitions
*/
@@ -51,7 +57,7 @@ struct sbp2_command_orb {
u32 data_descriptor_hi;
u32 data_descriptor_lo;
u32 misc;
- u8 cdb[12];
+ u8 cdb[SBP2_MAX_CDB_SIZE];
} __attribute__((packed));
#define SBP2_LOGIN_REQUEST 0x0
diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c
index ce511d8748ce..5be1bd4fc7ed 100644
--- a/drivers/infiniband/core/addr.c
+++ b/drivers/infiniband/core/addr.c
@@ -514,7 +514,7 @@ static struct notifier_block nb = {
.notifier_call = netevent_callback
};
-static int addr_init(void)
+static int __init addr_init(void)
{
addr_wq = create_singlethread_workqueue("ib_addr");
if (!addr_wq)
@@ -524,7 +524,7 @@ static int addr_init(void)
return 0;
}
-static void addr_cleanup(void)
+static void __exit addr_cleanup(void)
{
unregister_netevent_notifier(&nb);
destroy_workqueue(addr_wq);
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index 851de83ff455..075317884b53 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -2960,7 +2960,7 @@ static void cma_remove_one(struct ib_device *device)
kfree(cma_dev);
}
-static int cma_init(void)
+static int __init cma_init(void)
{
int ret, low, high, remaining;
@@ -2990,7 +2990,7 @@ err:
return ret;
}
-static void cma_cleanup(void)
+static void __exit cma_cleanup(void)
{
ib_unregister_client(&cma_client);
unregister_netdevice_notifier(&cma_nb);
diff --git a/drivers/infiniband/hw/ehca/ehca_hca.c b/drivers/infiniband/hw/ehca/ehca_hca.c
index 9209c5332dfe..8b92f85d4dd0 100644
--- a/drivers/infiniband/hw/ehca/ehca_hca.c
+++ b/drivers/infiniband/hw/ehca/ehca_hca.c
@@ -319,7 +319,7 @@ int ehca_query_gid(struct ib_device *ibdev, u8 port,
ib_device);
struct hipz_query_port *rblock;
- if (index > 255) {
+ if (index < 0 || index > 255) {
ehca_err(&shca->ib_device, "Invalid index: %x.", index);
return -EINVAL;
}
diff --git a/drivers/infiniband/hw/ehca/ehca_main.c b/drivers/infiniband/hw/ehca/ehca_main.c
index ce4e6eff4792..fab18a2c74a8 100644
--- a/drivers/infiniband/hw/ehca/ehca_main.c
+++ b/drivers/infiniband/hw/ehca/ehca_main.c
@@ -52,7 +52,7 @@
#include "ehca_tools.h"
#include "hcp_if.h"
-#define HCAD_VERSION "0027"
+#define HCAD_VERSION "0028"
MODULE_LICENSE("Dual BSD/GPL");
MODULE_AUTHOR("Christoph Raisch <raisch@de.ibm.com>");
@@ -506,6 +506,7 @@ static int ehca_init_device(struct ehca_shca *shca)
shca->ib_device.detach_mcast = ehca_detach_mcast;
shca->ib_device.process_mad = ehca_process_mad;
shca->ib_device.mmap = ehca_mmap;
+ shca->ib_device.dma_ops = &ehca_dma_mapping_ops;
if (EHCA_BMASK_GET(HCA_CAP_SRQ, shca->hca_cap)) {
shca->ib_device.uverbs_cmd_mask |=
@@ -1028,17 +1029,23 @@ static int __init ehca_module_init(void)
goto module_init1;
}
+ ret = ehca_create_busmap();
+ if (ret) {
+ ehca_gen_err("Cannot create busmap.");
+ goto module_init2;
+ }
+
ret = ibmebus_register_driver(&ehca_driver);
if (ret) {
ehca_gen_err("Cannot register eHCA device driver");
ret = -EINVAL;
- goto module_init2;
+ goto module_init3;
}
ret = register_memory_notifier(&ehca_mem_nb);
if (ret) {
ehca_gen_err("Failed registering memory add/remove notifier");
- goto module_init3;
+ goto module_init4;
}
if (ehca_poll_all_eqs != 1) {
@@ -1053,9 +1060,12 @@ static int __init ehca_module_init(void)
return 0;
-module_init3:
+module_init4:
ibmebus_unregister_driver(&ehca_driver);
+module_init3:
+ ehca_destroy_busmap();
+
module_init2:
ehca_destroy_slab_caches();
@@ -1073,6 +1083,8 @@ static void __exit ehca_module_exit(void)
unregister_memory_notifier(&ehca_mem_nb);
+ ehca_destroy_busmap();
+
ehca_destroy_slab_caches();
ehca_destroy_comp_pool();
diff --git a/drivers/infiniband/hw/ehca/ehca_mrmw.c b/drivers/infiniband/hw/ehca/ehca_mrmw.c
index 72f83f7df614..7663a2a9f130 100644
--- a/drivers/infiniband/hw/ehca/ehca_mrmw.c
+++ b/drivers/infiniband/hw/ehca/ehca_mrmw.c
@@ -53,6 +53,38 @@
/* max number of rpages (per hcall register_rpages) */
#define MAX_RPAGES 512
+/* DMEM toleration management */
+#define EHCA_SECTSHIFT SECTION_SIZE_BITS
+#define EHCA_SECTSIZE (1UL << EHCA_SECTSHIFT)
+#define EHCA_HUGEPAGESHIFT 34
+#define EHCA_HUGEPAGE_SIZE (1UL << EHCA_HUGEPAGESHIFT)
+#define EHCA_HUGEPAGE_PFN_MASK ((EHCA_HUGEPAGE_SIZE - 1) >> PAGE_SHIFT)
+#define EHCA_INVAL_ADDR 0xFFFFFFFFFFFFFFFFULL
+#define EHCA_DIR_INDEX_SHIFT 13 /* 8k Entries in 64k block */
+#define EHCA_TOP_INDEX_SHIFT (EHCA_DIR_INDEX_SHIFT * 2)
+#define EHCA_MAP_ENTRIES (1 << EHCA_DIR_INDEX_SHIFT)
+#define EHCA_TOP_MAP_SIZE (0x10000) /* currently fixed map size */
+#define EHCA_DIR_MAP_SIZE (0x10000)
+#define EHCA_ENT_MAP_SIZE (0x10000)
+#define EHCA_INDEX_MASK (EHCA_MAP_ENTRIES - 1)
+
+static unsigned long ehca_mr_len;
+
+/*
+ * Memory map data structures
+ */
+struct ehca_dir_bmap {
+ u64 ent[EHCA_MAP_ENTRIES];
+};
+struct ehca_top_bmap {
+ struct ehca_dir_bmap *dir[EHCA_MAP_ENTRIES];
+};
+struct ehca_bmap {
+ struct ehca_top_bmap *top[EHCA_MAP_ENTRIES];
+};
+
+static struct ehca_bmap *ehca_bmap;
+
static struct kmem_cache *mr_cache;
static struct kmem_cache *mw_cache;
@@ -68,6 +100,8 @@ enum ehca_mr_pgsize {
#define EHCA_MR_PGSHIFT1M 20
#define EHCA_MR_PGSHIFT16M 24
+static u64 ehca_map_vaddr(void *caddr);
+
static u32 ehca_encode_hwpage_size(u32 pgsize)
{
int log = ilog2(pgsize);
@@ -135,7 +169,8 @@ struct ib_mr *ehca_get_dma_mr(struct ib_pd *pd, int mr_access_flags)
goto get_dma_mr_exit0;
}
- ret = ehca_reg_maxmr(shca, e_maxmr, (u64 *)KERNELBASE,
+ ret = ehca_reg_maxmr(shca, e_maxmr,
+ (void *)ehca_map_vaddr((void *)KERNELBASE),
mr_access_flags, e_pd,
&e_maxmr->ib.ib_mr.lkey,
&e_maxmr->ib.ib_mr.rkey);
@@ -251,7 +286,7 @@ struct ib_mr *ehca_reg_phys_mr(struct ib_pd *pd,
ret = ehca_reg_mr(shca, e_mr, iova_start, size, mr_access_flags,
e_pd, &pginfo, &e_mr->ib.ib_mr.lkey,
- &e_mr->ib.ib_mr.rkey);
+ &e_mr->ib.ib_mr.rkey, EHCA_REG_MR);
if (ret) {
ib_mr = ERR_PTR(ret);
goto reg_phys_mr_exit1;
@@ -370,7 +405,7 @@ reg_user_mr_fallback:
ret = ehca_reg_mr(shca, e_mr, (u64 *)virt, length, mr_access_flags,
e_pd, &pginfo, &e_mr->ib.ib_mr.lkey,
- &e_mr->ib.ib_mr.rkey);
+ &e_mr->ib.ib_mr.rkey, EHCA_REG_MR);
if (ret == -EINVAL && pginfo.hwpage_size > PAGE_SIZE) {
ehca_warn(pd->device, "failed to register mr "
"with hwpage_size=%llx", hwpage_size);
@@ -794,7 +829,7 @@ struct ib_fmr *ehca_alloc_fmr(struct ib_pd *pd,
ret = ehca_reg_mr(shca, e_fmr, NULL,
fmr_attr->max_pages * (1 << fmr_attr->page_shift),
mr_access_flags, e_pd, &pginfo,
- &tmp_lkey, &tmp_rkey);
+ &tmp_lkey, &tmp_rkey, EHCA_REG_MR);
if (ret) {
ib_fmr = ERR_PTR(ret);
goto alloc_fmr_exit1;
@@ -983,6 +1018,10 @@ free_fmr_exit0:
/*----------------------------------------------------------------------*/
+static int ehca_reg_bmap_mr_rpages(struct ehca_shca *shca,
+ struct ehca_mr *e_mr,
+ struct ehca_mr_pginfo *pginfo);
+
int ehca_reg_mr(struct ehca_shca *shca,
struct ehca_mr *e_mr,
u64 *iova_start,
@@ -991,7 +1030,8 @@ int ehca_reg_mr(struct ehca_shca *shca,
struct ehca_pd *e_pd,
struct ehca_mr_pginfo *pginfo,
u32 *lkey, /*OUT*/
- u32 *rkey) /*OUT*/
+ u32 *rkey, /*OUT*/
+ enum ehca_reg_type reg_type)
{
int ret;
u64 h_ret;
@@ -1015,7 +1055,13 @@ int ehca_reg_mr(struct ehca_shca *shca,
e_mr->ipz_mr_handle = hipzout.handle;
- ret = ehca_reg_mr_rpages(shca, e_mr, pginfo);
+ if (reg_type == EHCA_REG_BUSMAP_MR)
+ ret = ehca_reg_bmap_mr_rpages(shca, e_mr, pginfo);
+ else if (reg_type == EHCA_REG_MR)
+ ret = ehca_reg_mr_rpages(shca, e_mr, pginfo);
+ else
+ ret = -EINVAL;
+
if (ret)
goto ehca_reg_mr_exit1;
@@ -1316,7 +1362,7 @@ int ehca_rereg_mr(struct ehca_shca *shca,
e_mr->fmr_map_cnt = save_mr.fmr_map_cnt;
ret = ehca_reg_mr(shca, e_mr, iova_start, size, acl,
- e_pd, pginfo, lkey, rkey);
+ e_pd, pginfo, lkey, rkey, EHCA_REG_MR);
if (ret) {
u32 offset = (u64)(&e_mr->flags) - (u64)e_mr;
memcpy(&e_mr->flags, &(save_mr.flags),
@@ -1409,7 +1455,7 @@ int ehca_unmap_one_fmr(struct ehca_shca *shca,
ret = ehca_reg_mr(shca, e_fmr, NULL,
(e_fmr->fmr_max_pages * e_fmr->fmr_page_size),
e_fmr->acl, e_pd, &pginfo, &tmp_lkey,
- &tmp_rkey);
+ &tmp_rkey, EHCA_REG_MR);
if (ret) {
u32 offset = (u64)(&e_fmr->flags) - (u64)e_fmr;
memcpy(&e_fmr->flags, &(save_mr.flags),
@@ -1478,6 +1524,90 @@ ehca_reg_smr_exit0:
} /* end ehca_reg_smr() */
/*----------------------------------------------------------------------*/
+static inline void *ehca_calc_sectbase(int top, int dir, int idx)
+{
+ unsigned long ret = idx;
+ ret |= dir << EHCA_DIR_INDEX_SHIFT;
+ ret |= top << EHCA_TOP_INDEX_SHIFT;
+ return abs_to_virt(ret << SECTION_SIZE_BITS);
+}
+
+#define ehca_bmap_valid(entry) \
+ ((u64)entry != (u64)EHCA_INVAL_ADDR)
+
+static u64 ehca_reg_mr_section(int top, int dir, int idx, u64 *kpage,
+ struct ehca_shca *shca, struct ehca_mr *mr,
+ struct ehca_mr_pginfo *pginfo)
+{
+ u64 h_ret = 0;
+ unsigned long page = 0;
+ u64 rpage = virt_to_abs(kpage);
+ int page_count;
+
+ void *sectbase = ehca_calc_sectbase(top, dir, idx);
+ if ((unsigned long)sectbase & (pginfo->hwpage_size - 1)) {
+ ehca_err(&shca->ib_device, "reg_mr_section will probably fail:"
+ "hwpage_size does not fit to "
+ "section start address");
+ }
+ page_count = EHCA_SECTSIZE / pginfo->hwpage_size;
+
+ while (page < page_count) {
+ u64 rnum;
+ for (rnum = 0; (rnum < MAX_RPAGES) && (page < page_count);
+ rnum++) {
+ void *pg = sectbase + ((page++) * pginfo->hwpage_size);
+ kpage[rnum] = virt_to_abs(pg);
+ }
+
+ h_ret = hipz_h_register_rpage_mr(shca->ipz_hca_handle, mr,
+ ehca_encode_hwpage_size(pginfo->hwpage_size),
+ 0, rpage, rnum);
+
+ if ((h_ret != H_SUCCESS) && (h_ret != H_PAGE_REGISTERED)) {
+ ehca_err(&shca->ib_device, "register_rpage_mr failed");
+ return h_ret;
+ }
+ }
+ return h_ret;
+}
+
+static u64 ehca_reg_mr_sections(int top, int dir, u64 *kpage,
+ struct ehca_shca *shca, struct ehca_mr *mr,
+ struct ehca_mr_pginfo *pginfo)
+{
+ u64 hret = H_SUCCESS;
+ int idx;
+
+ for (idx = 0; idx < EHCA_MAP_ENTRIES; idx++) {
+ if (!ehca_bmap_valid(ehca_bmap->top[top]->dir[dir]->ent[idx]))
+ continue;
+
+ hret = ehca_reg_mr_section(top, dir, idx, kpage, shca, mr,
+ pginfo);
+ if ((hret != H_SUCCESS) && (hret != H_PAGE_REGISTERED))
+ return hret;
+ }
+ return hret;
+}
+
+static u64 ehca_reg_mr_dir_sections(int top, u64 *kpage, struct ehca_shca *shca,
+ struct ehca_mr *mr,
+ struct ehca_mr_pginfo *pginfo)
+{
+ u64 hret = H_SUCCESS;
+ int dir;
+
+ for (dir = 0; dir < EHCA_MAP_ENTRIES; dir++) {
+ if (!ehca_bmap_valid(ehca_bmap->top[top]->dir[dir]))
+ continue;
+
+ hret = ehca_reg_mr_sections(top, dir, kpage, shca, mr, pginfo);
+ if ((hret != H_SUCCESS) && (hret != H_PAGE_REGISTERED))
+ return hret;
+ }
+ return hret;
+}
/* register internal max-MR to internal SHCA */
int ehca_reg_internal_maxmr(
@@ -1495,6 +1625,11 @@ int ehca_reg_internal_maxmr(
u32 num_hwpages;
u64 hw_pgsize;
+ if (!ehca_bmap) {
+ ret = -EFAULT;
+ goto ehca_reg_internal_maxmr_exit0;
+ }
+
e_mr = ehca_mr_new();
if (!e_mr) {
ehca_err(&shca->ib_device, "out of memory");
@@ -1504,8 +1639,8 @@ int ehca_reg_internal_maxmr(
e_mr->flags |= EHCA_MR_FLAG_MAXMR;
/* register internal max-MR on HCA */
- size_maxmr = (u64)high_memory - PAGE_OFFSET;
- iova_start = (u64 *)KERNELBASE;
+ size_maxmr = ehca_mr_len;
+ iova_start = (u64 *)ehca_map_vaddr((void *)KERNELBASE);
ib_pbuf.addr = 0;
ib_pbuf.size = size_maxmr;
num_kpages = NUM_CHUNKS(((u64)iova_start % PAGE_SIZE) + size_maxmr,
@@ -1524,7 +1659,7 @@ int ehca_reg_internal_maxmr(
ret = ehca_reg_mr(shca, e_mr, iova_start, size_maxmr, 0, e_pd,
&pginfo, &e_mr->ib.ib_mr.lkey,
- &e_mr->ib.ib_mr.rkey);
+ &e_mr->ib.ib_mr.rkey, EHCA_REG_BUSMAP_MR);
if (ret) {
ehca_err(&shca->ib_device, "reg of internal max MR failed, "
"e_mr=%p iova_start=%p size_maxmr=%llx num_kpages=%x "
@@ -2077,8 +2212,8 @@ int ehca_mr_is_maxmr(u64 size,
u64 *iova_start)
{
/* a MR is treated as max-MR only if it fits following: */
- if ((size == ((u64)high_memory - PAGE_OFFSET)) &&
- (iova_start == (void *)KERNELBASE)) {
+ if ((size == ehca_mr_len) &&
+ (iova_start == (void *)ehca_map_vaddr((void *)KERNELBASE))) {
ehca_gen_dbg("this is a max-MR");
return 1;
} else
@@ -2184,3 +2319,350 @@ void ehca_cleanup_mrmw_cache(void)
if (mw_cache)
kmem_cache_destroy(mw_cache);
}
+
+static inline int ehca_init_top_bmap(struct ehca_top_bmap *ehca_top_bmap,
+ int dir)
+{
+ if (!ehca_bmap_valid(ehca_top_bmap->dir[dir])) {
+ ehca_top_bmap->dir[dir] =
+ kmalloc(sizeof(struct ehca_dir_bmap), GFP_KERNEL);
+ if (!ehca_top_bmap->dir[dir])
+ return -ENOMEM;
+ /* Set map block to 0xFF according to EHCA_INVAL_ADDR */
+ memset(ehca_top_bmap->dir[dir], 0xFF, EHCA_ENT_MAP_SIZE);
+ }
+ return 0;
+}
+
+static inline int ehca_init_bmap(struct ehca_bmap *ehca_bmap, int top, int dir)
+{
+ if (!ehca_bmap_valid(ehca_bmap->top[top])) {
+ ehca_bmap->top[top] =
+ kmalloc(sizeof(struct ehca_top_bmap), GFP_KERNEL);
+ if (!ehca_bmap->top[top])
+ return -ENOMEM;
+ /* Set map block to 0xFF according to EHCA_INVAL_ADDR */
+ memset(ehca_bmap->top[top], 0xFF, EHCA_DIR_MAP_SIZE);
+ }
+ return ehca_init_top_bmap(ehca_bmap->top[top], dir);
+}
+
+static inline int ehca_calc_index(unsigned long i, unsigned long s)
+{
+ return (i >> s) & EHCA_INDEX_MASK;
+}
+
+void ehca_destroy_busmap(void)
+{
+ int top, dir;
+
+ if (!ehca_bmap)
+ return;
+
+ for (top = 0; top < EHCA_MAP_ENTRIES; top++) {
+ if (!ehca_bmap_valid(ehca_bmap->top[top]))
+ continue;
+ for (dir = 0; dir < EHCA_MAP_ENTRIES; dir++) {
+ if (!ehca_bmap_valid(ehca_bmap->top[top]->dir[dir]))
+ continue;
+
+ kfree(ehca_bmap->top[top]->dir[dir]);
+ }
+
+ kfree(ehca_bmap->top[top]);
+ }
+
+ kfree(ehca_bmap);
+ ehca_bmap = NULL;
+}
+
+static int ehca_update_busmap(unsigned long pfn, unsigned long nr_pages)
+{
+ unsigned long i, start_section, end_section;
+ int top, dir, idx;
+
+ if (!nr_pages)
+ return 0;
+
+ if (!ehca_bmap) {
+ ehca_bmap = kmalloc(sizeof(struct ehca_bmap), GFP_KERNEL);
+ if (!ehca_bmap)
+ return -ENOMEM;
+ /* Set map block to 0xFF according to EHCA_INVAL_ADDR */
+ memset(ehca_bmap, 0xFF, EHCA_TOP_MAP_SIZE);
+ }
+
+ start_section = phys_to_abs(pfn * PAGE_SIZE) / EHCA_SECTSIZE;
+ end_section = phys_to_abs((pfn + nr_pages) * PAGE_SIZE) / EHCA_SECTSIZE;
+ for (i = start_section; i < end_section; i++) {
+ int ret;
+ top = ehca_calc_index(i, EHCA_TOP_INDEX_SHIFT);
+ dir = ehca_calc_index(i, EHCA_DIR_INDEX_SHIFT);
+ idx = i & EHCA_INDEX_MASK;
+
+ ret = ehca_init_bmap(ehca_bmap, top, dir);
+ if (ret) {
+ ehca_destroy_busmap();
+ return ret;
+ }
+ ehca_bmap->top[top]->dir[dir]->ent[idx] = ehca_mr_len;
+ ehca_mr_len += EHCA_SECTSIZE;
+ }
+ return 0;
+}
+
+static int ehca_is_hugepage(unsigned long pfn)
+{
+ int page_order;
+
+ if (pfn & EHCA_HUGEPAGE_PFN_MASK)
+ return 0;
+
+ page_order = compound_order(pfn_to_page(pfn));
+ if (page_order + PAGE_SHIFT != EHCA_HUGEPAGESHIFT)
+ return 0;
+
+ return 1;
+}
+
+static int ehca_create_busmap_callback(unsigned long initial_pfn,
+ unsigned long total_nr_pages, void *arg)
+{
+ int ret;
+ unsigned long pfn, start_pfn, end_pfn, nr_pages;
+
+ if ((total_nr_pages * PAGE_SIZE) < EHCA_HUGEPAGE_SIZE)
+ return ehca_update_busmap(initial_pfn, total_nr_pages);
+
+ /* Given chunk is >= 16GB -> check for hugepages */
+ start_pfn = initial_pfn;
+ end_pfn = initial_pfn + total_nr_pages;
+ pfn = start_pfn;
+
+ while (pfn < end_pfn) {
+ if (ehca_is_hugepage(pfn)) {
+ /* Add mem found in front of the hugepage */
+ nr_pages = pfn - start_pfn;
+ ret = ehca_update_busmap(start_pfn, nr_pages);
+ if (ret)
+ return ret;
+ /* Skip the hugepage */
+ pfn += (EHCA_HUGEPAGE_SIZE / PAGE_SIZE);
+ start_pfn = pfn;
+ } else
+ pfn += (EHCA_SECTSIZE / PAGE_SIZE);
+ }
+
+ /* Add mem found behind the hugepage(s) */
+ nr_pages = pfn - start_pfn;
+ return ehca_update_busmap(start_pfn, nr_pages);
+}
+
+int ehca_create_busmap(void)
+{
+ int ret;
+
+ ehca_mr_len = 0;
+ ret = walk_memory_resource(0, 1ULL << MAX_PHYSMEM_BITS, NULL,
+ ehca_create_busmap_callback);
+ return ret;
+}
+
+static int ehca_reg_bmap_mr_rpages(struct ehca_shca *shca,
+ struct ehca_mr *e_mr,
+ struct ehca_mr_pginfo *pginfo)
+{
+ int top;
+ u64 hret, *kpage;
+
+ kpage = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
+ if (!kpage) {
+ ehca_err(&shca->ib_device, "kpage alloc failed");
+ return -ENOMEM;
+ }
+ for (top = 0; top < EHCA_MAP_ENTRIES; top++) {
+ if (!ehca_bmap_valid(ehca_bmap->top[top]))
+ continue;
+ hret = ehca_reg_mr_dir_sections(top, kpage, shca, e_mr, pginfo);
+ if ((hret != H_PAGE_REGISTERED) && (hret != H_SUCCESS))
+ break;
+ }
+
+ ehca_free_fw_ctrlblock(kpage);
+
+ if (hret == H_SUCCESS)
+ return 0; /* Everything is fine */
+ else {
+ ehca_err(&shca->ib_device, "ehca_reg_bmap_mr_rpages failed, "
+ "h_ret=%lli e_mr=%p top=%x lkey=%x "
+ "hca_hndl=%llx mr_hndl=%llx", hret, e_mr, top,
+ e_mr->ib.ib_mr.lkey,
+ shca->ipz_hca_handle.handle,
+ e_mr->ipz_mr_handle.handle);
+ return ehca2ib_return_code(hret);
+ }
+}
+
+static u64 ehca_map_vaddr(void *caddr)
+{
+ int top, dir, idx;
+ unsigned long abs_addr, offset;
+ u64 entry;
+
+ if (!ehca_bmap)
+ return EHCA_INVAL_ADDR;
+
+ abs_addr = virt_to_abs(caddr);
+ top = ehca_calc_index(abs_addr, EHCA_TOP_INDEX_SHIFT + EHCA_SECTSHIFT);
+ if (!ehca_bmap_valid(ehca_bmap->top[top]))
+ return EHCA_INVAL_ADDR;
+
+ dir = ehca_calc_index(abs_addr, EHCA_DIR_INDEX_SHIFT + EHCA_SECTSHIFT);
+ if (!ehca_bmap_valid(ehca_bmap->top[top]->dir[dir]))
+ return EHCA_INVAL_ADDR;
+
+ idx = ehca_calc_index(abs_addr, EHCA_SECTSHIFT);
+
+ entry = ehca_bmap->top[top]->dir[dir]->ent[idx];
+ if (ehca_bmap_valid(entry)) {
+ offset = (unsigned long)caddr & (EHCA_SECTSIZE - 1);
+ return entry | offset;
+ } else
+ return EHCA_INVAL_ADDR;
+}
+
+static int ehca_dma_mapping_error(struct ib_device *dev, u64 dma_addr)
+{
+ return dma_addr == EHCA_INVAL_ADDR;
+}
+
+static u64 ehca_dma_map_single(struct ib_device *dev, void *cpu_addr,
+ size_t size, enum dma_data_direction direction)
+{
+ if (cpu_addr)
+ return ehca_map_vaddr(cpu_addr);
+ else
+ return EHCA_INVAL_ADDR;
+}
+
+static void ehca_dma_unmap_single(struct ib_device *dev, u64 addr, size_t size,
+ enum dma_data_direction direction)
+{
+ /* This is only a stub; nothing to be done here */
+}
+
+static u64 ehca_dma_map_page(struct ib_device *dev, struct page *page,
+ unsigned long offset, size_t size,
+ enum dma_data_direction direction)
+{
+ u64 addr;
+
+ if (offset + size > PAGE_SIZE)
+ return EHCA_INVAL_ADDR;
+
+ addr = ehca_map_vaddr(page_address(page));
+ if (!ehca_dma_mapping_error(dev, addr))
+ addr += offset;
+
+ return addr;
+}
+
+static void ehca_dma_unmap_page(struct ib_device *dev, u64 addr, size_t size,
+ enum dma_data_direction direction)
+{
+ /* This is only a stub; nothing to be done here */
+}
+
+static int ehca_dma_map_sg(struct ib_device *dev, struct scatterlist *sgl,
+ int nents, enum dma_data_direction direction)
+{
+ struct scatterlist *sg;
+ int i;
+
+ for_each_sg(sgl, sg, nents, i) {
+ u64 addr;
+ addr = ehca_map_vaddr(sg_virt(sg));
+ if (ehca_dma_mapping_error(dev, addr))
+ return 0;
+
+ sg->dma_address = addr;
+ sg->dma_length = sg->length;
+ }
+ return nents;
+}
+
+static void ehca_dma_unmap_sg(struct ib_device *dev, struct scatterlist *sg,
+ int nents, enum dma_data_direction direction)
+{
+ /* This is only a stub; nothing to be done here */
+}
+
+static u64 ehca_dma_address(struct ib_device *dev, struct scatterlist *sg)
+{
+ return sg->dma_address;
+}
+
+static unsigned int ehca_dma_len(struct ib_device *dev, struct scatterlist *sg)
+{
+ return sg->length;
+}
+
+static void ehca_dma_sync_single_for_cpu(struct ib_device *dev, u64 addr,
+ size_t size,
+ enum dma_data_direction dir)
+{
+ dma_sync_single_for_cpu(dev->dma_device, addr, size, dir);
+}
+
+static void ehca_dma_sync_single_for_device(struct ib_device *dev, u64 addr,
+ size_t size,
+ enum dma_data_direction dir)
+{
+ dma_sync_single_for_device(dev->dma_device, addr, size, dir);
+}
+
+static void *ehca_dma_alloc_coherent(struct ib_device *dev, size_t size,
+ u64 *dma_handle, gfp_t flag)
+{
+ struct page *p;
+ void *addr = NULL;
+ u64 dma_addr;
+
+ p = alloc_pages(flag, get_order(size));
+ if (p) {
+ addr = page_address(p);
+ dma_addr = ehca_map_vaddr(addr);
+ if (ehca_dma_mapping_error(dev, dma_addr)) {
+ free_pages((unsigned long)addr, get_order(size));
+ return NULL;
+ }
+ if (dma_handle)
+ *dma_handle = dma_addr;
+ return addr;
+ }
+ return NULL;
+}
+
+static void ehca_dma_free_coherent(struct ib_device *dev, size_t size,
+ void *cpu_addr, u64 dma_handle)
+{
+ if (cpu_addr && size)
+ free_pages((unsigned long)cpu_addr, get_order(size));
+}
+
+
+struct ib_dma_mapping_ops ehca_dma_mapping_ops = {
+ .mapping_error = ehca_dma_mapping_error,
+ .map_single = ehca_dma_map_single,
+ .unmap_single = ehca_dma_unmap_single,
+ .map_page = ehca_dma_map_page,
+ .unmap_page = ehca_dma_unmap_page,
+ .map_sg = ehca_dma_map_sg,
+ .unmap_sg = ehca_dma_unmap_sg,
+ .dma_address = ehca_dma_address,
+ .dma_len = ehca_dma_len,
+ .sync_single_for_cpu = ehca_dma_sync_single_for_cpu,
+ .sync_single_for_device = ehca_dma_sync_single_for_device,
+ .alloc_coherent = ehca_dma_alloc_coherent,
+ .free_coherent = ehca_dma_free_coherent,
+};
diff --git a/drivers/infiniband/hw/ehca/ehca_mrmw.h b/drivers/infiniband/hw/ehca/ehca_mrmw.h
index bc8f4e31c123..50d8b51306dd 100644
--- a/drivers/infiniband/hw/ehca/ehca_mrmw.h
+++ b/drivers/infiniband/hw/ehca/ehca_mrmw.h
@@ -42,6 +42,11 @@
#ifndef _EHCA_MRMW_H_
#define _EHCA_MRMW_H_
+enum ehca_reg_type {
+ EHCA_REG_MR,
+ EHCA_REG_BUSMAP_MR
+};
+
int ehca_reg_mr(struct ehca_shca *shca,
struct ehca_mr *e_mr,
u64 *iova_start,
@@ -50,7 +55,8 @@ int ehca_reg_mr(struct ehca_shca *shca,
struct ehca_pd *e_pd,
struct ehca_mr_pginfo *pginfo,
u32 *lkey,
- u32 *rkey);
+ u32 *rkey,
+ enum ehca_reg_type reg_type);
int ehca_reg_mr_rpages(struct ehca_shca *shca,
struct ehca_mr *e_mr,
@@ -118,4 +124,9 @@ void ehca_mrmw_reverse_map_acl(const u32 *hipz_acl,
void ehca_mr_deletenew(struct ehca_mr *mr);
+int ehca_create_busmap(void);
+
+void ehca_destroy_busmap(void);
+
+extern struct ib_dma_mapping_ops ehca_dma_mapping_ops;
#endif /*_EHCA_MRMW_H_*/
diff --git a/drivers/infiniband/hw/mthca/mthca_mr.c b/drivers/infiniband/hw/mthca/mthca_mr.c
index d606edf10858..065b20899876 100644
--- a/drivers/infiniband/hw/mthca/mthca_mr.c
+++ b/drivers/infiniband/hw/mthca/mthca_mr.c
@@ -352,10 +352,14 @@ static void mthca_arbel_write_mtt_seg(struct mthca_dev *dev,
BUG_ON(!mtts);
+ dma_sync_single_for_cpu(&dev->pdev->dev, dma_handle,
+ list_len * sizeof (u64), DMA_TO_DEVICE);
+
for (i = 0; i < list_len; ++i)
mtts[i] = cpu_to_be64(buffer_list[i] | MTHCA_MTT_FLAG_PRESENT);
- dma_sync_single(&dev->pdev->dev, dma_handle, list_len * sizeof (u64), DMA_TO_DEVICE);
+ dma_sync_single_for_device(&dev->pdev->dev, dma_handle,
+ list_len * sizeof (u64), DMA_TO_DEVICE);
}
int mthca_write_mtt(struct mthca_dev *dev, struct mthca_mtt *mtt,
@@ -803,12 +807,15 @@ int mthca_arbel_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list,
wmb();
+ dma_sync_single_for_cpu(&dev->pdev->dev, fmr->mem.arbel.dma_handle,
+ list_len * sizeof(u64), DMA_TO_DEVICE);
+
for (i = 0; i < list_len; ++i)
fmr->mem.arbel.mtts[i] = cpu_to_be64(page_list[i] |
MTHCA_MTT_FLAG_PRESENT);
- dma_sync_single(&dev->pdev->dev, fmr->mem.arbel.dma_handle,
- list_len * sizeof(u64), DMA_TO_DEVICE);
+ dma_sync_single_for_device(&dev->pdev->dev, fmr->mem.arbel.dma_handle,
+ list_len * sizeof(u64), DMA_TO_DEVICE);
fmr->mem.arbel.mpt->key = cpu_to_be32(key);
fmr->mem.arbel.mpt->lkey = cpu_to_be32(key);
diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
index 11c7d6642014..114b802771ad 100644
--- a/drivers/infiniband/hw/nes/nes_cm.c
+++ b/drivers/infiniband/hw/nes/nes_cm.c
@@ -472,6 +472,7 @@ int schedule_nes_timer(struct nes_cm_node *cm_node, struct sk_buff *skb,
static void nes_retrans_expired(struct nes_cm_node *cm_node)
{
+ struct iw_cm_id *cm_id = cm_node->cm_id;
switch (cm_node->state) {
case NES_CM_STATE_SYN_RCVD:
case NES_CM_STATE_CLOSING:
@@ -479,7 +480,9 @@ static void nes_retrans_expired(struct nes_cm_node *cm_node)
break;
case NES_CM_STATE_LAST_ACK:
case NES_CM_STATE_FIN_WAIT1:
- case NES_CM_STATE_MPAREJ_RCVD:
+ if (cm_node->cm_id)
+ cm_id->rem_ref(cm_id);
+ cm_node->state = NES_CM_STATE_CLOSED;
send_reset(cm_node, NULL);
break;
default:
@@ -1406,6 +1409,7 @@ static void handle_rst_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
case NES_CM_STATE_CLOSED:
drop_packet(skb);
break;
+ case NES_CM_STATE_FIN_WAIT1:
case NES_CM_STATE_LAST_ACK:
cm_node->cm_id->rem_ref(cm_node->cm_id);
case NES_CM_STATE_TIME_WAIT:
@@ -1413,8 +1417,6 @@ static void handle_rst_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
rem_ref_cm_node(cm_node->cm_core, cm_node);
drop_packet(skb);
break;
- case NES_CM_STATE_FIN_WAIT1:
- nes_debug(NES_DBG_CM, "Bad state %s[%u]\n", __func__, __LINE__);
default:
drop_packet(skb);
break;
diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
index 64d5cfd8f380..21e0fd336cf7 100644
--- a/drivers/infiniband/hw/nes/nes_verbs.c
+++ b/drivers/infiniband/hw/nes/nes_verbs.c
@@ -654,7 +654,7 @@ static int nes_query_device(struct ib_device *ibdev, struct ib_device_attr *prop
default:
props->max_qp_rd_atom = 0;
}
- props->max_qp_init_rd_atom = props->max_qp_wr;
+ props->max_qp_init_rd_atom = props->max_qp_rd_atom;
props->atomic_cap = IB_ATOMIC_NONE;
props->max_map_per_fmr = 1;
diff --git a/drivers/input/keyboard/Kconfig b/drivers/input/keyboard/Kconfig
index 9d8f796c6745..a6b989a9dc07 100644
--- a/drivers/input/keyboard/Kconfig
+++ b/drivers/input/keyboard/Kconfig
@@ -12,6 +12,42 @@ menuconfig INPUT_KEYBOARD
if INPUT_KEYBOARD
+config KEYBOARD_AAED2000
+ tristate "AAED-2000 keyboard"
+ depends on MACH_AAED2000
+ select INPUT_POLLDEV
+ default y
+ help
+ Say Y here to enable the keyboard on the Agilent AAED-2000
+ development board.
+
+ To compile this driver as a module, choose M here: the
+ module will be called aaed2000_kbd.
+
+config KEYBOARD_AMIGA
+ tristate "Amiga keyboard"
+ depends on AMIGA
+ help
+ Say Y here if you are running Linux on any AMIGA and have a keyboard
+ attached.
+
+ To compile this driver as a module, choose M here: the
+ module will be called amikbd.
+
+config ATARI_KBD_CORE
+ bool
+
+config KEYBOARD_ATARI
+ tristate "Atari keyboard"
+ depends on ATARI
+ select ATARI_KBD_CORE
+ help
+ Say Y here if you are running Linux on any Atari and have a keyboard
+ attached.
+
+ To compile this driver as a module, choose M here: the
+ module will be called atakbd.
+
config KEYBOARD_ATKBD
tristate "AT keyboard" if EMBEDDED || !X86
default y
@@ -68,69 +104,14 @@ config KEYBOARD_ATKBD_RDI_KEYCODES
right-hand column will be interpreted as the key shown in the
left-hand column.
-config KEYBOARD_SUNKBD
- tristate "Sun Type 4 and Type 5 keyboard"
- select SERIO
- help
- Say Y here if you want to use a Sun Type 4 or Type 5 keyboard,
- connected either to the Sun keyboard connector or to an serial
- (RS-232) port via a simple adapter.
-
- To compile this driver as a module, choose M here: the
- module will be called sunkbd.
-
-config KEYBOARD_LKKBD
- tristate "DECstation/VAXstation LK201/LK401 keyboard"
- select SERIO
- help
- Say Y here if you want to use a LK201 or LK401 style serial
- keyboard. This keyboard is also useable on PCs if you attach
- it with the inputattach program. The connector pinout is
- described within lkkbd.c.
-
- To compile this driver as a module, choose M here: the
- module will be called lkkbd.
-
-config KEYBOARD_LOCOMO
- tristate "LoCoMo Keyboard Support"
- depends on SHARP_LOCOMO && INPUT_KEYBOARD
- help
- Say Y here if you are running Linux on a Sharp Zaurus Collie or Poodle based PDA
-
- To compile this driver as a module, choose M here: the
- module will be called locomokbd.
-
-config KEYBOARD_XTKBD
- tristate "XT keyboard"
- select SERIO
- help
- Say Y here if you want to use the old IBM PC/XT keyboard (or
- compatible) on your system. This is only possible with a
- parallel port keyboard adapter, you cannot connect it to the
- keyboard port on a PC that runs Linux.
-
- To compile this driver as a module, choose M here: the
- module will be called xtkbd.
-
-config KEYBOARD_NEWTON
- tristate "Newton keyboard"
- select SERIO
- help
- Say Y here if you have a Newton keyboard on a serial port.
-
- To compile this driver as a module, choose M here: the
- module will be called newtonkbd.
-
-config KEYBOARD_STOWAWAY
- tristate "Stowaway keyboard"
- select SERIO
+config KEYBOARD_BFIN
+ tristate "Blackfin BF54x keypad support"
+ depends on (BF54x && !BF544)
help
- Say Y here if you have a Stowaway keyboard on a serial port.
- Stowaway compatible keyboards like Dicota Input-PDA keyboard
- are also supported by this driver.
+ Say Y here if you want to use the BF54x keypad.
To compile this driver as a module, choose M here: the
- module will be called stowaway.
+ module will be called bf54x-keys.
config KEYBOARD_CORGI
tristate "Corgi keyboard"
@@ -143,61 +124,50 @@ config KEYBOARD_CORGI
To compile this driver as a module, choose M here: the
module will be called corgikbd.
-config KEYBOARD_SPITZ
- tristate "Spitz keyboard"
- depends on PXA_SHARPSL
- default y
+config KEYBOARD_LKKBD
+ tristate "DECstation/VAXstation LK201/LK401 keyboard"
+ select SERIO
help
- Say Y here to enable the keyboard on the Sharp Zaurus SL-C1000,
- SL-C3000 and Sl-C3100 series of PDAs.
+ Say Y here if you want to use a LK201 or LK401 style serial
+ keyboard. This keyboard is also useable on PCs if you attach
+ it with the inputattach program. The connector pinout is
+ described within lkkbd.c.
To compile this driver as a module, choose M here: the
- module will be called spitzkbd.
+ module will be called lkkbd.
-config KEYBOARD_TOSA
- tristate "Tosa keyboard"
- depends on MACH_TOSA
- default y
+config KEYBOARD_EP93XX
+ tristate "EP93xx Matrix Keypad support"
+ depends on ARCH_EP93XX
help
- Say Y here to enable the keyboard on the Sharp Zaurus SL-6000x (Tosa)
+ Say Y here to enable the matrix keypad on the Cirrus EP93XX.
To compile this driver as a module, choose M here: the
- module will be called tosakbd.
+ module will be called ep93xx_keypad.
-config KEYBOARD_TOSA_USE_EXT_KEYCODES
- bool "Tosa keyboard: use extended keycodes"
- depends on KEYBOARD_TOSA
- default n
+config KEYBOARD_GPIO
+ tristate "GPIO Buttons"
+ depends on GENERIC_GPIO
help
- Say Y here to enable the tosa keyboard driver to generate extended
- (>= 127) keycodes. Be aware, that they can't be correctly interpreted
- by either console keyboard driver or by Kdrive keybd driver.
-
- Say Y only if you know, what you are doing!
+ This driver implements support for buttons connected
+ to GPIO pins of various CPUs (and some other chips).
-config KEYBOARD_AMIGA
- tristate "Amiga keyboard"
- depends on AMIGA
- help
- Say Y here if you are running Linux on any AMIGA and have a keyboard
- attached.
+ Say Y here if your device has buttons connected
+ directly to such GPIO pins. Your board-specific
+ setup logic must also provide a platform device,
+ with configuration data saying which GPIOs are used.
To compile this driver as a module, choose M here: the
- module will be called amikbd.
+ module will be called gpio_keys.
-config ATARI_KBD_CORE
- bool
-
-config KEYBOARD_ATARI
- tristate "Atari keyboard"
- depends on ATARI
- select ATARI_KBD_CORE
+config KEYBOARD_MATRIX
+ tristate "GPIO driven matrix keypad support"
+ depends on GENERIC_GPIO
help
- Say Y here if you are running Linux on any Atari and have a keyboard
- attached.
+ Enable support for GPIO driven matrix keypad.
To compile this driver as a module, choose M here: the
- module will be called atakbd.
+ module will be called matrix_keypad.
config KEYBOARD_HIL_OLD
tristate "HP HIL keyboard support (simple driver)"
@@ -261,20 +231,39 @@ config KEYBOARD_LM8323
To compile this driver as a module, choose M here: the
module will be called lm8323.
-config KEYBOARD_OMAP
- tristate "TI OMAP keypad support"
- depends on (ARCH_OMAP1 || ARCH_OMAP2)
+config KEYBOARD_LOCOMO
+ tristate "LoCoMo Keyboard Support"
+ depends on SHARP_LOCOMO
help
- Say Y here if you want to use the OMAP keypad.
+ Say Y here if you are running Linux on a Sharp Zaurus Collie or Poodle based PDA
To compile this driver as a module, choose M here: the
- module will be called omap-keypad.
+ module will be called locomokbd.
+
+config KEYBOARD_MAPLE
+ tristate "Maple bus keyboard"
+ depends on SH_DREAMCAST && MAPLE
+ help
+ Say Y here if you have a Dreamcast console running Linux and have
+ a keyboard attached to its Maple bus.
+
+ To compile this driver as a module, choose M here: the
+ module will be called maple_keyb.
+
+config KEYBOARD_NEWTON
+ tristate "Newton keyboard"
+ select SERIO
+ help
+ Say Y here if you have a Newton keyboard on a serial port.
+
+ To compile this driver as a module, choose M here: the
+ module will be called newtonkbd.
config KEYBOARD_PXA27x
tristate "PXA27x/PXA3xx keypad support"
depends on PXA27x || PXA3xx
help
- Enable support for PXA27x/PXA3xx keypad controller
+ Enable support for PXA27x/PXA3xx keypad controller.
To compile this driver as a module, choose M here: the
module will be called pxa27x_keypad.
@@ -288,51 +277,38 @@ config KEYBOARD_PXA930_ROTARY
To compile this driver as a module, choose M here: the
module will be called pxa930_rotary.
-config KEYBOARD_AAED2000
- tristate "AAED-2000 keyboard"
- depends on MACH_AAED2000
- select INPUT_POLLDEV
+config KEYBOARD_SPITZ
+ tristate "Spitz keyboard"
+ depends on PXA_SHARPSL
default y
help
- Say Y here to enable the keyboard on the Agilent AAED-2000
- development board.
-
- To compile this driver as a module, choose M here: the
- module will be called aaed2000_kbd.
-
-config KEYBOARD_GPIO
- tristate "GPIO Buttons"
- depends on GENERIC_GPIO
- help
- This driver implements support for buttons connected
- to GPIO pins of various CPUs (and some other chips).
-
- Say Y here if your device has buttons connected
- directly to such GPIO pins. Your board-specific
- setup logic must also provide a platform device,
- with configuration data saying which GPIOs are used.
+ Say Y here to enable the keyboard on the Sharp Zaurus SL-C1000,
+ SL-C3000 and Sl-C3100 series of PDAs.
To compile this driver as a module, choose M here: the
- module will be called gpio-keys.
+ module will be called spitzkbd.
-config KEYBOARD_MAPLE
- tristate "Maple bus keyboard"
- depends on SH_DREAMCAST && MAPLE
+config KEYBOARD_STOWAWAY
+ tristate "Stowaway keyboard"
+ select SERIO
help
- Say Y here if you have a Dreamcast console running Linux and have
- a keyboard attached to its Maple bus.
+ Say Y here if you have a Stowaway keyboard on a serial port.
+ Stowaway compatible keyboards like Dicota Input-PDA keyboard
+ are also supported by this driver.
To compile this driver as a module, choose M here: the
- module will be called maple_keyb.
+ module will be called stowaway.
-config KEYBOARD_BFIN
- tristate "Blackfin BF54x keypad support"
- depends on (BF54x && !BF544)
+config KEYBOARD_SUNKBD
+ tristate "Sun Type 4 and Type 5 keyboard"
+ select SERIO
help
- Say Y here if you want to use the BF54x keypad.
+ Say Y here if you want to use a Sun Type 4 or Type 5 keyboard,
+ connected either to the Sun keyboard connector or to an serial
+ (RS-232) port via a simple adapter.
To compile this driver as a module, choose M here: the
- module will be called bf54x-keys.
+ module will be called sunkbd.
config KEYBOARD_SH_KEYSC
tristate "SuperH KEYSC keypad support"
@@ -344,13 +320,45 @@ config KEYBOARD_SH_KEYSC
To compile this driver as a module, choose M here: the
module will be called sh_keysc.
-config KEYBOARD_EP93XX
- tristate "EP93xx Matrix Keypad support"
- depends on ARCH_EP93XX
+config KEYBOARD_OMAP
+ tristate "TI OMAP keypad support"
+ depends on (ARCH_OMAP1 || ARCH_OMAP2)
help
- Say Y here to enable the matrix keypad on the Cirrus EP93XX.
+ Say Y here if you want to use the OMAP keypad.
To compile this driver as a module, choose M here: the
- module will be called ep93xx_keypad.
+ module will be called omap-keypad.
+
+config KEYBOARD_TOSA
+ tristate "Tosa keyboard"
+ depends on MACH_TOSA
+ default y
+ help
+ Say Y here to enable the keyboard on the Sharp Zaurus SL-6000x (Tosa)
+
+ To compile this driver as a module, choose M here: the
+ module will be called tosakbd.
+
+config KEYBOARD_TOSA_USE_EXT_KEYCODES
+ bool "Tosa keyboard: use extended keycodes"
+ depends on KEYBOARD_TOSA
+ help
+ Say Y here to enable the tosa keyboard driver to generate extended
+ (>= 127) keycodes. Be aware, that they can't be correctly interpreted
+ by either console keyboard driver or by Kdrive keybd driver.
+
+ Say Y only if you know, what you are doing!
+
+config KEYBOARD_XTKBD
+ tristate "XT keyboard"
+ select SERIO
+ help
+ Say Y here if you want to use the old IBM PC/XT keyboard (or
+ compatible) on your system. This is only possible with a
+ parallel port keyboard adapter, you cannot connect it to the
+ keyboard port on a PC that runs Linux.
+
+ To compile this driver as a module, choose M here: the
+ module will be called xtkbd.
endif
diff --git a/drivers/input/keyboard/Makefile b/drivers/input/keyboard/Makefile
index 156b647a259b..b5b5eae9724f 100644
--- a/drivers/input/keyboard/Makefile
+++ b/drivers/input/keyboard/Makefile
@@ -4,29 +4,30 @@
# Each configuration option enables a list of files.
-obj-$(CONFIG_KEYBOARD_ATKBD) += atkbd.o
-obj-$(CONFIG_KEYBOARD_SUNKBD) += sunkbd.o
-obj-$(CONFIG_KEYBOARD_LKKBD) += lkkbd.o
-obj-$(CONFIG_KEYBOARD_XTKBD) += xtkbd.o
+obj-$(CONFIG_KEYBOARD_AAED2000) += aaed2000_kbd.o
obj-$(CONFIG_KEYBOARD_AMIGA) += amikbd.o
obj-$(CONFIG_KEYBOARD_ATARI) += atakbd.o
-obj-$(CONFIG_KEYBOARD_LOCOMO) += locomokbd.o
-obj-$(CONFIG_KEYBOARD_NEWTON) += newtonkbd.o
-obj-$(CONFIG_KEYBOARD_STOWAWAY) += stowaway.o
+obj-$(CONFIG_KEYBOARD_ATKBD) += atkbd.o
+obj-$(CONFIG_KEYBOARD_BFIN) += bf54x-keys.o
obj-$(CONFIG_KEYBOARD_CORGI) += corgikbd.o
-obj-$(CONFIG_KEYBOARD_SPITZ) += spitzkbd.o
-obj-$(CONFIG_KEYBOARD_TOSA) += tosakbd.o
+obj-$(CONFIG_KEYBOARD_EP93XX) += ep93xx_keypad.o
+obj-$(CONFIG_KEYBOARD_GPIO) += gpio_keys.o
obj-$(CONFIG_KEYBOARD_HIL) += hil_kbd.o
obj-$(CONFIG_KEYBOARD_HIL_OLD) += hilkbd.o
+obj-$(CONFIG_KEYBOARD_HP6XX) += jornada680_kbd.o
+obj-$(CONFIG_KEYBOARD_HP7XX) += jornada720_kbd.o
+obj-$(CONFIG_KEYBOARD_LKKBD) += lkkbd.o
obj-$(CONFIG_KEYBOARD_LM8323) += lm8323.o
+obj-$(CONFIG_KEYBOARD_LOCOMO) += locomokbd.o
+obj-$(CONFIG_KEYBOARD_MAPLE) += maple_keyb.o
+obj-$(CONFIG_KEYBOARD_MATRIX) += matrix_keypad.o
+obj-$(CONFIG_KEYBOARD_NEWTON) += newtonkbd.o
obj-$(CONFIG_KEYBOARD_OMAP) += omap-keypad.o
obj-$(CONFIG_KEYBOARD_PXA27x) += pxa27x_keypad.o
obj-$(CONFIG_KEYBOARD_PXA930_ROTARY) += pxa930_rotary.o
-obj-$(CONFIG_KEYBOARD_AAED2000) += aaed2000_kbd.o
-obj-$(CONFIG_KEYBOARD_GPIO) += gpio_keys.o
-obj-$(CONFIG_KEYBOARD_HP6XX) += jornada680_kbd.o
-obj-$(CONFIG_KEYBOARD_HP7XX) += jornada720_kbd.o
-obj-$(CONFIG_KEYBOARD_MAPLE) += maple_keyb.o
-obj-$(CONFIG_KEYBOARD_BFIN) += bf54x-keys.o
obj-$(CONFIG_KEYBOARD_SH_KEYSC) += sh_keysc.o
-obj-$(CONFIG_KEYBOARD_EP93XX) += ep93xx_keypad.o
+obj-$(CONFIG_KEYBOARD_SPITZ) += spitzkbd.o
+obj-$(CONFIG_KEYBOARD_STOWAWAY) += stowaway.o
+obj-$(CONFIG_KEYBOARD_SUNKBD) += sunkbd.o
+obj-$(CONFIG_KEYBOARD_TOSA) += tosakbd.o
+obj-$(CONFIG_KEYBOARD_XTKBD) += xtkbd.o
diff --git a/drivers/input/keyboard/gpio_keys.c b/drivers/input/keyboard/gpio_keys.c
index 2157cd7de00c..efed0c9e242e 100644
--- a/drivers/input/keyboard/gpio_keys.c
+++ b/drivers/input/keyboard/gpio_keys.c
@@ -29,7 +29,8 @@
struct gpio_button_data {
struct gpio_keys_button *button;
struct input_dev *input;
- struct delayed_work work;
+ struct timer_list timer;
+ struct work_struct work;
};
struct gpio_keys_drvdata {
@@ -40,7 +41,7 @@ struct gpio_keys_drvdata {
static void gpio_keys_report_event(struct work_struct *work)
{
struct gpio_button_data *bdata =
- container_of(work, struct gpio_button_data, work.work);
+ container_of(work, struct gpio_button_data, work);
struct gpio_keys_button *button = bdata->button;
struct input_dev *input = bdata->input;
unsigned int type = button->type ?: EV_KEY;
@@ -50,17 +51,25 @@ static void gpio_keys_report_event(struct work_struct *work)
input_sync(input);
}
+static void gpio_keys_timer(unsigned long _data)
+{
+ struct gpio_button_data *data = (struct gpio_button_data *)_data;
+
+ schedule_work(&data->work);
+}
+
static irqreturn_t gpio_keys_isr(int irq, void *dev_id)
{
struct gpio_button_data *bdata = dev_id;
struct gpio_keys_button *button = bdata->button;
- unsigned long delay;
BUG_ON(irq != gpio_to_irq(button->gpio));
- delay = button->debounce_interval ?
- msecs_to_jiffies(button->debounce_interval) : 0;
- schedule_delayed_work(&bdata->work, delay);
+ if (button->debounce_interval)
+ mod_timer(&bdata->timer,
+ jiffies + msecs_to_jiffies(button->debounce_interval));
+ else
+ schedule_work(&bdata->work);
return IRQ_HANDLED;
}
@@ -107,7 +116,9 @@ static int __devinit gpio_keys_probe(struct platform_device *pdev)
bdata->input = input;
bdata->button = button;
- INIT_DELAYED_WORK(&bdata->work, gpio_keys_report_event);
+ setup_timer(&bdata->timer,
+ gpio_keys_timer, (unsigned long)bdata);
+ INIT_WORK(&bdata->work, gpio_keys_report_event);
error = gpio_request(button->gpio, button->desc ?: "gpio_keys");
if (error < 0) {
@@ -166,7 +177,9 @@ static int __devinit gpio_keys_probe(struct platform_device *pdev)
fail2:
while (--i >= 0) {
free_irq(gpio_to_irq(pdata->buttons[i].gpio), &ddata->data[i]);
- cancel_delayed_work_sync(&ddata->data[i].work);
+ if (pdata->buttons[i].debounce_interval)
+ del_timer_sync(&ddata->data[i].timer);
+ cancel_work_sync(&ddata->data[i].work);
gpio_free(pdata->buttons[i].gpio);
}
@@ -190,7 +203,9 @@ static int __devexit gpio_keys_remove(struct platform_device *pdev)
for (i = 0; i < pdata->nbuttons; i++) {
int irq = gpio_to_irq(pdata->buttons[i].gpio);
free_irq(irq, &ddata->data[i]);
- cancel_delayed_work_sync(&ddata->data[i].work);
+ if (pdata->buttons[i].debounce_interval)
+ del_timer_sync(&ddata->data[i].timer);
+ cancel_work_sync(&ddata->data[i].work);
gpio_free(pdata->buttons[i].gpio);
}
diff --git a/drivers/input/keyboard/matrix_keypad.c b/drivers/input/keyboard/matrix_keypad.c
new file mode 100644
index 000000000000..e9b2e7cb05be
--- /dev/null
+++ b/drivers/input/keyboard/matrix_keypad.c
@@ -0,0 +1,453 @@
+/*
+ * GPIO driven matrix keyboard driver
+ *
+ * Copyright (c) 2008 Marek Vasut <marek.vasut@gmail.com>
+ *
+ * Based on corgikbd.c
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/types.h>
+#include <linux/delay.h>
+#include <linux/platform_device.h>
+#include <linux/init.h>
+#include <linux/input.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+#include <linux/jiffies.h>
+#include <linux/module.h>
+#include <linux/gpio.h>
+#include <linux/input/matrix_keypad.h>
+
+struct matrix_keypad {
+ const struct matrix_keypad_platform_data *pdata;
+ struct input_dev *input_dev;
+ unsigned short *keycodes;
+
+ uint32_t last_key_state[MATRIX_MAX_COLS];
+ struct delayed_work work;
+ bool scan_pending;
+ bool stopped;
+ spinlock_t lock;
+};
+
+/*
+ * NOTE: normally the GPIO has to be put into HiZ when de-activated to cause
+ * minmal side effect when scanning other columns, here it is configured to
+ * be input, and it should work on most platforms.
+ */
+static void __activate_col(const struct matrix_keypad_platform_data *pdata,
+ int col, bool on)
+{
+ bool level_on = !pdata->active_low;
+
+ if (on) {
+ gpio_direction_output(pdata->col_gpios[col], level_on);
+ } else {
+ gpio_set_value_cansleep(pdata->col_gpios[col], !level_on);
+ gpio_direction_input(pdata->col_gpios[col]);
+ }
+}
+
+static void activate_col(const struct matrix_keypad_platform_data *pdata,
+ int col, bool on)
+{
+ __activate_col(pdata, col, on);
+
+ if (on && pdata->col_scan_delay_us)
+ udelay(pdata->col_scan_delay_us);
+}
+
+static void activate_all_cols(const struct matrix_keypad_platform_data *pdata,
+ bool on)
+{
+ int col;
+
+ for (col = 0; col < pdata->num_col_gpios; col++)
+ __activate_col(pdata, col, on);
+}
+
+static bool row_asserted(const struct matrix_keypad_platform_data *pdata,
+ int row)
+{
+ return gpio_get_value_cansleep(pdata->row_gpios[row]) ?
+ !pdata->active_low : pdata->active_low;
+}
+
+static void enable_row_irqs(struct matrix_keypad *keypad)
+{
+ const struct matrix_keypad_platform_data *pdata = keypad->pdata;
+ int i;
+
+ for (i = 0; i < pdata->num_row_gpios; i++)
+ enable_irq(gpio_to_irq(pdata->row_gpios[i]));
+}
+
+static void disable_row_irqs(struct matrix_keypad *keypad)
+{
+ const struct matrix_keypad_platform_data *pdata = keypad->pdata;
+ int i;
+
+ for (i = 0; i < pdata->num_row_gpios; i++)
+ disable_irq_nosync(gpio_to_irq(pdata->row_gpios[i]));
+}
+
+/*
+ * This gets the keys from keyboard and reports it to input subsystem
+ */
+static void matrix_keypad_scan(struct work_struct *work)
+{
+ struct matrix_keypad *keypad =
+ container_of(work, struct matrix_keypad, work.work);
+ struct input_dev *input_dev = keypad->input_dev;
+ const struct matrix_keypad_platform_data *pdata = keypad->pdata;
+ uint32_t new_state[MATRIX_MAX_COLS];
+ int row, col, code;
+
+ /* de-activate all columns for scanning */
+ activate_all_cols(pdata, false);
+
+ memset(new_state, 0, sizeof(new_state));
+
+ /* assert each column and read the row status out */
+ for (col = 0; col < pdata->num_col_gpios; col++) {
+
+ activate_col(pdata, col, true);
+
+ for (row = 0; row < pdata->num_row_gpios; row++)
+ new_state[col] |=
+ row_asserted(pdata, row) ? (1 << row) : 0;
+
+ activate_col(pdata, col, false);
+ }
+
+ for (col = 0; col < pdata->num_col_gpios; col++) {
+ uint32_t bits_changed;
+
+ bits_changed = keypad->last_key_state[col] ^ new_state[col];
+ if (bits_changed == 0)
+ continue;
+
+ for (row = 0; row < pdata->num_row_gpios; row++) {
+ if ((bits_changed & (1 << row)) == 0)
+ continue;
+
+ code = (row << 4) + col;
+ input_event(input_dev, EV_MSC, MSC_SCAN, code);
+ input_report_key(input_dev,
+ keypad->keycodes[code],
+ new_state[col] & (1 << row));
+ }
+ }
+ input_sync(input_dev);
+
+ memcpy(keypad->last_key_state, new_state, sizeof(new_state));
+
+ activate_all_cols(pdata, true);
+
+ /* Enable IRQs again */
+ spin_lock_irq(&keypad->lock);
+ keypad->scan_pending = false;
+ enable_row_irqs(keypad);
+ spin_unlock_irq(&keypad->lock);
+}
+
+static irqreturn_t matrix_keypad_interrupt(int irq, void *id)
+{
+ struct matrix_keypad *keypad = id;
+ unsigned long flags;
+
+ spin_lock_irqsave(&keypad->lock, flags);
+
+ /*
+ * See if another IRQ beaten us to it and scheduled the
+ * scan already. In that case we should not try to
+ * disable IRQs again.
+ */
+ if (unlikely(keypad->scan_pending || keypad->stopped))
+ goto out;
+
+ disable_row_irqs(keypad);
+ keypad->scan_pending = true;
+ schedule_delayed_work(&keypad->work,
+ msecs_to_jiffies(keypad->pdata->debounce_ms));
+
+out:
+ spin_unlock_irqrestore(&keypad->lock, flags);
+ return IRQ_HANDLED;
+}
+
+static int matrix_keypad_start(struct input_dev *dev)
+{
+ struct matrix_keypad *keypad = input_get_drvdata(dev);
+
+ keypad->stopped = false;
+ mb();
+
+ /*
+ * Schedule an immediate key scan to capture current key state;
+ * columns will be activated and IRQs be enabled after the scan.
+ */
+ schedule_delayed_work(&keypad->work, 0);
+
+ return 0;
+}
+
+static void matrix_keypad_stop(struct input_dev *dev)
+{
+ struct matrix_keypad *keypad = input_get_drvdata(dev);
+
+ keypad->stopped = true;
+ mb();
+ flush_work(&keypad->work.work);
+ /*
+ * matrix_keypad_scan() will leave IRQs enabled;
+ * we should disable them now.
+ */
+ disable_row_irqs(keypad);
+}
+
+#ifdef CONFIG_PM
+static int matrix_keypad_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ struct matrix_keypad *keypad = platform_get_drvdata(pdev);
+ const struct matrix_keypad_platform_data *pdata = keypad->pdata;
+ int i;
+
+ matrix_keypad_stop(keypad->input_dev);
+
+ if (device_may_wakeup(&pdev->dev))
+ for (i = 0; i < pdata->num_row_gpios; i++)
+ enable_irq_wake(gpio_to_irq(pdata->row_gpios[i]));
+
+ return 0;
+}
+
+static int matrix_keypad_resume(struct platform_device *pdev)
+{
+ struct matrix_keypad *keypad = platform_get_drvdata(pdev);
+ const struct matrix_keypad_platform_data *pdata = keypad->pdata;
+ int i;
+
+ if (device_may_wakeup(&pdev->dev))
+ for (i = 0; i < pdata->num_row_gpios; i++)
+ disable_irq_wake(gpio_to_irq(pdata->row_gpios[i]));
+
+ matrix_keypad_start(keypad->input_dev);
+
+ return 0;
+}
+#else
+#define matrix_keypad_suspend NULL
+#define matrix_keypad_resume NULL
+#endif
+
+static int __devinit init_matrix_gpio(struct platform_device *pdev,
+ struct matrix_keypad *keypad)
+{
+ const struct matrix_keypad_platform_data *pdata = keypad->pdata;
+ int i, err = -EINVAL;
+
+ /* initialized strobe lines as outputs, activated */
+ for (i = 0; i < pdata->num_col_gpios; i++) {
+ err = gpio_request(pdata->col_gpios[i], "matrix_kbd_col");
+ if (err) {
+ dev_err(&pdev->dev,
+ "failed to request GPIO%d for COL%d\n",
+ pdata->col_gpios[i], i);
+ goto err_free_cols;
+ }
+
+ gpio_direction_output(pdata->col_gpios[i], !pdata->active_low);
+ }
+
+ for (i = 0; i < pdata->num_row_gpios; i++) {
+ err = gpio_request(pdata->row_gpios[i], "matrix_kbd_row");
+ if (err) {
+ dev_err(&pdev->dev,
+ "failed to request GPIO%d for ROW%d\n",
+ pdata->row_gpios[i], i);
+ goto err_free_rows;
+ }
+
+ gpio_direction_input(pdata->row_gpios[i]);
+ }
+
+ for (i = 0; i < pdata->num_row_gpios; i++) {
+ err = request_irq(gpio_to_irq(pdata->row_gpios[i]),
+ matrix_keypad_interrupt,
+ IRQF_DISABLED |
+ IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
+ "matrix-keypad", keypad);
+ if (err) {
+ dev_err(&pdev->dev,
+ "Unable to acquire interrupt for GPIO line %i\n",
+ pdata->row_gpios[i]);
+ goto err_free_irqs;
+ }
+ }
+
+ /* initialized as disabled - enabled by input->open */
+ disable_row_irqs(keypad);
+ return 0;
+
+err_free_irqs:
+ while (--i >= 0)
+ free_irq(gpio_to_irq(pdata->row_gpios[i]), keypad);
+ i = pdata->num_row_gpios;
+err_free_rows:
+ while (--i >= 0)
+ gpio_free(pdata->row_gpios[i]);
+ i = pdata->num_col_gpios;
+err_free_cols:
+ while (--i >= 0)
+ gpio_free(pdata->col_gpios[i]);
+
+ return err;
+}
+
+static int __devinit matrix_keypad_probe(struct platform_device *pdev)
+{
+ const struct matrix_keypad_platform_data *pdata;
+ const struct matrix_keymap_data *keymap_data;
+ struct matrix_keypad *keypad;
+ struct input_dev *input_dev;
+ unsigned short *keycodes;
+ int i;
+ int err;
+
+ pdata = pdev->dev.platform_data;
+ if (!pdata) {
+ dev_err(&pdev->dev, "no platform data defined\n");
+ return -EINVAL;
+ }
+
+ keymap_data = pdata->keymap_data;
+ if (!keymap_data) {
+ dev_err(&pdev->dev, "no keymap data defined\n");
+ return -EINVAL;
+ }
+
+ if (!keymap_data->max_keymap_size) {
+ dev_err(&pdev->dev, "invalid keymap data supplied\n");
+ return -EINVAL;
+ }
+
+ keypad = kzalloc(sizeof(struct matrix_keypad), GFP_KERNEL);
+ keycodes = kzalloc(keymap_data->max_keymap_size *
+ sizeof(keypad->keycodes),
+ GFP_KERNEL);
+ input_dev = input_allocate_device();
+ if (!keypad || !keycodes || !input_dev) {
+ err = -ENOMEM;
+ goto err_free_mem;
+ }
+
+ keypad->input_dev = input_dev;
+ keypad->pdata = pdata;
+ keypad->keycodes = keycodes;
+ keypad->stopped = true;
+ INIT_DELAYED_WORK(&keypad->work, matrix_keypad_scan);
+ spin_lock_init(&keypad->lock);
+
+ input_dev->name = pdev->name;
+ input_dev->id.bustype = BUS_HOST;
+ input_dev->dev.parent = &pdev->dev;
+ input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_REP);
+ input_dev->open = matrix_keypad_start;
+ input_dev->close = matrix_keypad_stop;
+
+ input_dev->keycode = keycodes;
+ input_dev->keycodesize = sizeof(*keycodes);
+ input_dev->keycodemax = keymap_data->max_keymap_size;
+
+ for (i = 0; i < keymap_data->keymap_size; i++) {
+ unsigned int key = keymap_data->keymap[i];
+ unsigned int row = KEY_ROW(key);
+ unsigned int col = KEY_COL(key);
+ unsigned short code = KEY_VAL(key);
+
+ keycodes[(row << 4) + col] = code;
+ __set_bit(code, input_dev->keybit);
+ }
+ __clear_bit(KEY_RESERVED, input_dev->keybit);
+
+ input_set_capability(input_dev, EV_MSC, MSC_SCAN);
+ input_set_drvdata(input_dev, keypad);
+
+ err = init_matrix_gpio(pdev, keypad);
+ if (err)
+ goto err_free_mem;
+
+ err = input_register_device(keypad->input_dev);
+ if (err)
+ goto err_free_mem;
+
+ device_init_wakeup(&pdev->dev, pdata->wakeup);
+ platform_set_drvdata(pdev, keypad);
+
+ return 0;
+
+err_free_mem:
+ input_free_device(input_dev);
+ kfree(keycodes);
+ kfree(keypad);
+ return err;
+}
+
+static int __devexit matrix_keypad_remove(struct platform_device *pdev)
+{
+ struct matrix_keypad *keypad = platform_get_drvdata(pdev);
+ const struct matrix_keypad_platform_data *pdata = keypad->pdata;
+ int i;
+
+ device_init_wakeup(&pdev->dev, 0);
+
+ for (i = 0; i < pdata->num_row_gpios; i++) {
+ free_irq(gpio_to_irq(pdata->row_gpios[i]), keypad);
+ gpio_free(pdata->row_gpios[i]);
+ }
+
+ for (i = 0; i < pdata->num_col_gpios; i++)
+ gpio_free(pdata->col_gpios[i]);
+
+ input_unregister_device(keypad->input_dev);
+ platform_set_drvdata(pdev, NULL);
+ kfree(keypad->keycodes);
+ kfree(keypad);
+
+ return 0;
+}
+
+static struct platform_driver matrix_keypad_driver = {
+ .probe = matrix_keypad_probe,
+ .remove = __devexit_p(matrix_keypad_remove),
+ .suspend = matrix_keypad_suspend,
+ .resume = matrix_keypad_resume,
+ .driver = {
+ .name = "matrix-keypad",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init matrix_keypad_init(void)
+{
+ return platform_driver_register(&matrix_keypad_driver);
+}
+
+static void __exit matrix_keypad_exit(void)
+{
+ platform_driver_unregister(&matrix_keypad_driver);
+}
+
+module_init(matrix_keypad_init);
+module_exit(matrix_keypad_exit);
+
+MODULE_AUTHOR("Marek Vasut <marek.vasut@gmail.com>");
+MODULE_DESCRIPTION("GPIO Driven Matrix Keypad Driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:matrix-keypad");
diff --git a/drivers/input/misc/cobalt_btns.c b/drivers/input/misc/cobalt_btns.c
index 2adf9cb265da..d114d3a9e1e9 100644
--- a/drivers/input/misc/cobalt_btns.c
+++ b/drivers/input/misc/cobalt_btns.c
@@ -1,7 +1,7 @@
/*
* Cobalt button interface driver.
*
- * Copyright (C) 2007-2008 Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp>
+ * Copyright (C) 2007-2008 Yoichi Yuasa <yuasa@linux-mips.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -148,7 +148,7 @@ static int __devexit cobalt_buttons_remove(struct platform_device *pdev)
return 0;
}
-MODULE_AUTHOR("Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp>");
+MODULE_AUTHOR("Yoichi Yuasa <yuasa@linux-mips.org>");
MODULE_DESCRIPTION("Cobalt button interface driver");
MODULE_LICENSE("GPL");
/* work with hotplug and coldplug */
diff --git a/drivers/input/mouse/gpio_mouse.c b/drivers/input/mouse/gpio_mouse.c
index 5e5eb88d8d1e..7b6ce178f1b6 100644
--- a/drivers/input/mouse/gpio_mouse.c
+++ b/drivers/input/mouse/gpio_mouse.c
@@ -46,7 +46,7 @@ static void gpio_mouse_scan(struct input_polled_dev *dev)
input_sync(input);
}
-static int __init gpio_mouse_probe(struct platform_device *pdev)
+static int __devinit gpio_mouse_probe(struct platform_device *pdev)
{
struct gpio_mouse_platform_data *pdata = pdev->dev.platform_data;
struct input_polled_dev *input_poll;
@@ -170,10 +170,8 @@ static int __devexit gpio_mouse_remove(struct platform_device *pdev)
return 0;
}
-/* work with hotplug and coldplug */
-MODULE_ALIAS("platform:gpio_mouse");
-
static struct platform_driver gpio_mouse_device_driver = {
+ .probe = gpio_mouse_probe,
.remove = __devexit_p(gpio_mouse_remove),
.driver = {
.name = "gpio_mouse",
@@ -183,8 +181,7 @@ static struct platform_driver gpio_mouse_device_driver = {
static int __init gpio_mouse_init(void)
{
- return platform_driver_probe(&gpio_mouse_device_driver,
- gpio_mouse_probe);
+ return platform_driver_register(&gpio_mouse_device_driver);
}
module_init(gpio_mouse_init);
@@ -197,3 +194,5 @@ module_exit(gpio_mouse_exit);
MODULE_AUTHOR("Hans-Christian Egtvedt <hcegtvedt@atmel.com>");
MODULE_DESCRIPTION("GPIO mouse driver");
MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:gpio_mouse"); /* work with hotplug and coldplug */
+
diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
index fb8a3cd3ffd0..924e8ed7f2cf 100644
--- a/drivers/input/serio/i8042-x86ia64io.h
+++ b/drivers/input/serio/i8042-x86ia64io.h
@@ -392,6 +392,34 @@ static struct dmi_system_id __initdata i8042_dmi_reset_table[] = {
DMI_MATCH(DMI_BOARD_VENDOR, "LG Electronics Inc."),
},
},
+ {
+ .ident = "Acer Aspire One 150",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "AOA150"),
+ },
+ },
+ {
+ .ident = "Advent 4211",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "DIXONSXP"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Advent 4211"),
+ },
+ },
+ {
+ .ident = "Medion Akoya Mini E1210",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "MEDION"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "E1210"),
+ },
+ },
+ {
+ .ident = "Mivvy M310",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "VIOOO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "N10"),
+ },
+ },
{ }
};
diff --git a/drivers/input/serio/i8042.c b/drivers/input/serio/i8042.c
index f919bf57293c..582245c497eb 100644
--- a/drivers/input/serio/i8042.c
+++ b/drivers/input/serio/i8042.c
@@ -934,10 +934,11 @@ static bool i8042_suspended;
static int i8042_suspend(struct platform_device *dev, pm_message_t state)
{
- if (!i8042_suspended && state.event == PM_EVENT_SUSPEND) {
+ if (!i8042_suspended && state.event == PM_EVENT_SUSPEND)
i8042_controller_reset();
- i8042_suspended = true;
- }
+
+ i8042_suspended = state.event == PM_EVENT_SUSPEND ||
+ state.event == PM_EVENT_FREEZE;
return 0;
}
diff --git a/drivers/input/serio/serio.c b/drivers/input/serio/serio.c
index fb17573f8f2d..d66f4944f2a0 100644
--- a/drivers/input/serio/serio.c
+++ b/drivers/input/serio/serio.c
@@ -935,10 +935,11 @@ static int serio_suspend(struct device *dev, pm_message_t state)
{
struct serio *serio = to_serio_port(dev);
- if (!serio->suspended && state.event == PM_EVENT_SUSPEND) {
+ if (!serio->suspended && state.event == PM_EVENT_SUSPEND)
serio_cleanup(serio);
- serio->suspended = true;
- }
+
+ serio->suspended = state.event == PM_EVENT_SUSPEND ||
+ state.event == PM_EVENT_FREEZE;
return 0;
}
diff --git a/drivers/input/tablet/wacom_wac.c b/drivers/input/tablet/wacom_wac.c
index 38bf86384aeb..c896d6a21b7e 100644
--- a/drivers/input/tablet/wacom_wac.c
+++ b/drivers/input/tablet/wacom_wac.c
@@ -384,6 +384,8 @@ static int wacom_intuos_inout(struct wacom_wac *wacom, void *wcombo)
wacom_report_key(wcombo, BTN_STYLUS2, 0);
wacom_report_key(wcombo, BTN_TOUCH, 0);
wacom_report_abs(wcombo, ABS_WHEEL, 0);
+ if (wacom->features->type >= INTUOS3S)
+ wacom_report_abs(wcombo, ABS_Z, 0);
}
wacom_report_key(wcombo, wacom->tool[idx], 0);
wacom_report_abs(wcombo, ABS_MISC, 0); /* reset tool id */
@@ -836,6 +838,7 @@ static struct wacom_features wacom_features[] = {
{ "Wacom DTU710", 8, 34080, 27660, 511, 0, PL },
{ "Wacom DTF521", 8, 6282, 4762, 511, 0, PL },
{ "Wacom DTF720", 8, 6858, 5506, 511, 0, PL },
+ { "Wacom DTF720a", 8, 6858, 5506, 511, 0, PL },
{ "Wacom Cintiq Partner",8, 20480, 15360, 511, 0, PTU },
{ "Wacom Intuos2 4x5", 10, 12700, 10600, 1023, 31, INTUOS },
{ "Wacom Intuos2 6x8", 10, 20320, 16240, 1023, 31, INTUOS },
@@ -897,8 +900,9 @@ static struct usb_device_id wacom_ids[] = {
{ USB_DEVICE(USB_VENDOR_ID_WACOM, 0x37) },
{ USB_DEVICE(USB_VENDOR_ID_WACOM, 0x38) },
{ USB_DEVICE(USB_VENDOR_ID_WACOM, 0x39) },
- { USB_DEVICE(USB_VENDOR_ID_WACOM, 0xC0) },
{ USB_DEVICE(USB_VENDOR_ID_WACOM, 0xC4) },
+ { USB_DEVICE(USB_VENDOR_ID_WACOM, 0xC0) },
+ { USB_DEVICE(USB_VENDOR_ID_WACOM, 0xC2) },
{ USB_DEVICE(USB_VENDOR_ID_WACOM, 0x03) },
{ USB_DEVICE(USB_VENDOR_ID_WACOM, 0x41) },
{ USB_DEVICE(USB_VENDOR_ID_WACOM, 0x42) },
diff --git a/drivers/isdn/gigaset/ev-layer.c b/drivers/isdn/gigaset/ev-layer.c
index ec5169604a6a..2d91049571a4 100644
--- a/drivers/isdn/gigaset/ev-layer.c
+++ b/drivers/isdn/gigaset/ev-layer.c
@@ -294,32 +294,33 @@ struct reply_t gigaset_tab_cid[] =
{RSP_OK, 604,604, -1, 605, 5, {ACT_CMD+AT_MSN}},
{RSP_OK, 605,605, -1, 606, 5, {ACT_CMD+AT_ISO}},
{RSP_NULL, 605,605, -1, 606, 5, {ACT_CMD+AT_ISO}},
- {RSP_OK, 606,606, -1, 607, 5, {0}, "+VLS=17\r"}, /* set "Endgeraetemodus" */
+ {RSP_OK, 606,606, -1, 607, 5, {0}, "+VLS=17\r"},
{RSP_OK, 607,607, -1, 608,-1},
- //{RSP_ZSAU, 608,608,ZSAU_PROCEEDING, 608, 0, {ACT_ERROR}},//DELETE
{RSP_ZSAU, 608,608,ZSAU_PROCEEDING, 609, 5, {ACT_CMD+AT_DIAL}},
{RSP_OK, 609,609, -1, 650, 0, {ACT_DIALING}},
- {RSP_ZVLS, 608,608, 17, -1,-1, {ACT_DEBUG}},
- {RSP_ZCTP, 609,609, -1, -1,-1, {ACT_DEBUG}},
- {RSP_ZCPN, 609,609, -1, -1,-1, {ACT_DEBUG}},
{RSP_ERROR, 601,609, -1, 0, 0, {ACT_ABORTDIAL}},
{EV_TIMEOUT, 601,609, -1, 0, 0, {ACT_ABORTDIAL}},
- /* dialing */
- {RSP_ZCTP, 650,650, -1, -1,-1, {ACT_DEBUG}},
- {RSP_ZCPN, 650,650, -1, -1,-1, {ACT_DEBUG}},
- {RSP_ZSAU, 650,650,ZSAU_CALL_DELIVERED, -1,-1, {ACT_DEBUG}}, /* some devices don't send this */
-
- /* connection established */
- {RSP_ZSAU, 650,650,ZSAU_ACTIVE, 800,-1, {ACT_CONNECT}}, //FIXME -> DLE1
- {RSP_ZSAU, 750,750,ZSAU_ACTIVE, 800,-1, {ACT_CONNECT}}, //FIXME -> DLE1
-
- {EV_BC_OPEN, 800,800, -1, 800,-1, {ACT_NOTIFY_BC_UP}}, //FIXME new constate + timeout
+ /* optional dialing responses */
+ {EV_BC_OPEN, 650,650, -1, 651,-1},
+ {RSP_ZVLS, 608,651, 17, -1,-1, {ACT_DEBUG}},
+ {RSP_ZCTP, 609,651, -1, -1,-1, {ACT_DEBUG}},
+ {RSP_ZCPN, 609,651, -1, -1,-1, {ACT_DEBUG}},
+ {RSP_ZSAU, 650,651,ZSAU_CALL_DELIVERED, -1,-1, {ACT_DEBUG}},
+
+ /* connect */
+ {RSP_ZSAU, 650,650,ZSAU_ACTIVE, 800,-1, {ACT_CONNECT}},
+ {RSP_ZSAU, 651,651,ZSAU_ACTIVE, 800,-1, {ACT_CONNECT,
+ ACT_NOTIFY_BC_UP}},
+ {RSP_ZSAU, 750,750,ZSAU_ACTIVE, 800,-1, {ACT_CONNECT}},
+ {RSP_ZSAU, 751,751,ZSAU_ACTIVE, 800,-1, {ACT_CONNECT,
+ ACT_NOTIFY_BC_UP}},
+ {EV_BC_OPEN, 800,800, -1, 800,-1, {ACT_NOTIFY_BC_UP}},
/* remote hangup */
- {RSP_ZSAU, 650,650,ZSAU_DISCONNECT_IND, 0, 0, {ACT_REMOTEREJECT}},
- {RSP_ZSAU, 750,750,ZSAU_DISCONNECT_IND, 0, 0, {ACT_REMOTEHUP}},
+ {RSP_ZSAU, 650,651,ZSAU_DISCONNECT_IND, 0, 0, {ACT_REMOTEREJECT}},
+ {RSP_ZSAU, 750,751,ZSAU_DISCONNECT_IND, 0, 0, {ACT_REMOTEHUP}},
{RSP_ZSAU, 800,800,ZSAU_DISCONNECT_IND, 0, 0, {ACT_REMOTEHUP}},
/* hangup */
@@ -358,7 +359,8 @@ struct reply_t gigaset_tab_cid[] =
{RSP_ZSAU, 700,729,ZSAU_ACTIVE, 0, 0, {ACT_ABORTACCEPT}},
{RSP_ZSAU, 700,729,ZSAU_DISCONNECT_IND, 0, 0, {ACT_ABORTACCEPT}},
- {EV_TIMEOUT, 750,750, -1, 0, 0, {ACT_CONNTIMEOUT}},
+ {EV_BC_OPEN, 750,750, -1, 751,-1},
+ {EV_TIMEOUT, 750,751, -1, 0, 0, {ACT_CONNTIMEOUT}},
/* B channel closed (general case) */
{EV_BC_CLOSED, -1, -1, -1, -1,-1, {ACT_NOTIFY_BC_DOWN}}, //FIXME
@@ -876,12 +878,6 @@ static void bchannel_down(struct bc_state *bcs)
static void bchannel_up(struct bc_state *bcs)
{
- if (!(bcs->chstate & CHS_D_UP)) {
- dev_notice(bcs->cs->dev, "%s: D channel not up\n", __func__);
- bcs->chstate |= CHS_D_UP;
- gigaset_i4l_channel_cmd(bcs, ISDN_STAT_DCONN);
- }
-
if (bcs->chstate & CHS_B_UP) {
dev_notice(bcs->cs->dev, "%s: B channel already up\n",
__func__);
diff --git a/drivers/isdn/gigaset/isocdata.c b/drivers/isdn/gigaset/isocdata.c
index db3a1e4cd489..bed38fcc432b 100644
--- a/drivers/isdn/gigaset/isocdata.c
+++ b/drivers/isdn/gigaset/isocdata.c
@@ -174,12 +174,6 @@ int gigaset_isowbuf_getbytes(struct isowbuf_t *iwb, int size)
pr_err("invalid size %d\n", size);
return -EINVAL;
}
- src = iwb->read;
- if (unlikely(limit >= BAS_OUTBUFSIZE + BAS_OUTBUFPAD ||
- (read < src && limit >= src))) {
- pr_err("isoc write buffer frame reservation violated\n");
- return -EFAULT;
- }
#endif
if (read < write) {
diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig
index 9b60b6b684d9..7c8e7122aaa9 100644
--- a/drivers/leds/Kconfig
+++ b/drivers/leds/Kconfig
@@ -75,6 +75,7 @@ config LEDS_ALIX2
depends on LEDS_CLASS && X86 && EXPERIMENTAL
help
This option enables support for the PCEngines ALIX.2 and ALIX.3 LEDs.
+ You have to set leds-alix2.force=1 for boards with Award BIOS.
config LEDS_H1940
tristate "LED Support for iPAQ H1940 device"
@@ -145,15 +146,16 @@ config LEDS_GPIO_OF
of_platform devices. For instance, LEDs which are listed in a "dts"
file.
-config LEDS_LP5521
- tristate "LED Support for the LP5521 LEDs"
+config LEDS_LP3944
+ tristate "LED Support for N.S. LP3944 (Fun Light) I2C chip"
depends on LEDS_CLASS && I2C
help
- If you say 'Y' here you get support for the National Semiconductor
- LP5521 LED driver used in n8x0 boards.
+ This option enables support for LEDs connected to the National
+ Semiconductor LP3944 Lighting Management Unit (LMU) also known as
+ Fun Light Chip.
- This driver can be built as a module by choosing 'M'. The module
- will be called leds-lp5521.
+ To compile this driver as a module, choose M here: the
+ module will be called leds-lp3944.
config LEDS_CLEVO_MAIL
tristate "Mail LED on Clevo notebook"
diff --git a/drivers/leds/Makefile b/drivers/leds/Makefile
index 2d41c4dcf92f..e8cdcf77a4c3 100644
--- a/drivers/leds/Makefile
+++ b/drivers/leds/Makefile
@@ -20,6 +20,7 @@ obj-$(CONFIG_LEDS_COBALT_RAQ) += leds-cobalt-raq.o
obj-$(CONFIG_LEDS_SUNFIRE) += leds-sunfire.o
obj-$(CONFIG_LEDS_PCA9532) += leds-pca9532.o
obj-$(CONFIG_LEDS_GPIO) += leds-gpio.o
+obj-$(CONFIG_LEDS_LP3944) += leds-lp3944.o
obj-$(CONFIG_LEDS_CLEVO_MAIL) += leds-clevo-mail.o
obj-$(CONFIG_LEDS_HP6XX) += leds-hp6xx.o
obj-$(CONFIG_LEDS_FSG) += leds-fsg.o
diff --git a/drivers/leds/leds-alix2.c b/drivers/leds/leds-alix2.c
index ddbd7730dfc8..731d4eef3425 100644
--- a/drivers/leds/leds-alix2.c
+++ b/drivers/leds/leds-alix2.c
@@ -14,7 +14,7 @@
static int force = 0;
module_param(force, bool, 0444);
-MODULE_PARM_DESC(force, "Assume system has ALIX.2 style LEDs");
+MODULE_PARM_DESC(force, "Assume system has ALIX.2/ALIX.3 style LEDs");
struct alix_led {
struct led_classdev cdev;
@@ -155,6 +155,11 @@ static int __init alix_led_init(void)
goto out;
}
+ /* enable output on GPIO for LED 1,2,3 */
+ outl(1 << 6, 0x6104);
+ outl(1 << 9, 0x6184);
+ outl(1 << 11, 0x6184);
+
pdev = platform_device_register_simple(KBUILD_MODNAME, -1, NULL, 0);
if (!IS_ERR(pdev)) {
ret = platform_driver_probe(&alix_led_driver, alix_led_probe);
diff --git a/drivers/leds/leds-bd2802.c b/drivers/leds/leds-bd2802.c
index 4149ecb3a9b2..779d7f262c04 100644
--- a/drivers/leds/leds-bd2802.c
+++ b/drivers/leds/leds-bd2802.c
@@ -97,6 +97,10 @@ struct bd2802_led {
enum led_ids led_id;
enum led_colors color;
enum led_bits state;
+
+ /* General attributes of RGB LEDs */
+ int wave_pattern;
+ int rgb_current;
};
@@ -254,7 +258,7 @@ static void bd2802_set_on(struct bd2802_led *led, enum led_ids id,
bd2802_reset_cancel(led);
reg = bd2802_get_reg_addr(id, color, BD2802_REG_CURRENT1SETUP);
- bd2802_write_byte(led->client, reg, BD2802_CURRENT_032);
+ bd2802_write_byte(led->client, reg, led->rgb_current);
reg = bd2802_get_reg_addr(id, color, BD2802_REG_CURRENT2SETUP);
bd2802_write_byte(led->client, reg, BD2802_CURRENT_000);
reg = bd2802_get_reg_addr(id, color, BD2802_REG_WAVEPATTERN);
@@ -275,9 +279,9 @@ static void bd2802_set_blink(struct bd2802_led *led, enum led_ids id,
reg = bd2802_get_reg_addr(id, color, BD2802_REG_CURRENT1SETUP);
bd2802_write_byte(led->client, reg, BD2802_CURRENT_000);
reg = bd2802_get_reg_addr(id, color, BD2802_REG_CURRENT2SETUP);
- bd2802_write_byte(led->client, reg, BD2802_CURRENT_032);
+ bd2802_write_byte(led->client, reg, led->rgb_current);
reg = bd2802_get_reg_addr(id, color, BD2802_REG_WAVEPATTERN);
- bd2802_write_byte(led->client, reg, BD2802_PATTERN_HALF);
+ bd2802_write_byte(led->client, reg, led->wave_pattern);
bd2802_enable(led, id);
bd2802_update_state(led, id, color, BD2802_BLINK);
@@ -406,7 +410,7 @@ static void bd2802_enable_adv_conf(struct bd2802_led *led)
ret = device_create_file(&led->client->dev,
bd2802_addr_attributes[i]);
if (ret) {
- dev_err(&led->client->dev, "failed to sysfs file %s\n",
+ dev_err(&led->client->dev, "failed: sysfs file %s\n",
bd2802_addr_attributes[i]->attr.name);
goto failed_remove_files;
}
@@ -483,6 +487,52 @@ static struct device_attribute bd2802_adv_conf_attr = {
.store = bd2802_store_adv_conf,
};
+#define BD2802_CONTROL_ATTR(attr_name, name_str) \
+static ssize_t bd2802_show_##attr_name(struct device *dev, \
+ struct device_attribute *attr, char *buf) \
+{ \
+ struct bd2802_led *led = i2c_get_clientdata(to_i2c_client(dev));\
+ ssize_t ret; \
+ down_read(&led->rwsem); \
+ ret = sprintf(buf, "0x%02x\n", led->attr_name); \
+ up_read(&led->rwsem); \
+ return ret; \
+} \
+static ssize_t bd2802_store_##attr_name(struct device *dev, \
+ struct device_attribute *attr, const char *buf, size_t count) \
+{ \
+ struct bd2802_led *led = i2c_get_clientdata(to_i2c_client(dev));\
+ unsigned long val; \
+ int ret; \
+ if (!count) \
+ return -EINVAL; \
+ ret = strict_strtoul(buf, 16, &val); \
+ if (ret) \
+ return ret; \
+ down_write(&led->rwsem); \
+ led->attr_name = val; \
+ up_write(&led->rwsem); \
+ return count; \
+} \
+static struct device_attribute bd2802_##attr_name##_attr = { \
+ .attr = { \
+ .name = name_str, \
+ .mode = 0644, \
+ .owner = THIS_MODULE \
+ }, \
+ .show = bd2802_show_##attr_name, \
+ .store = bd2802_store_##attr_name, \
+};
+
+BD2802_CONTROL_ATTR(wave_pattern, "wave_pattern");
+BD2802_CONTROL_ATTR(rgb_current, "rgb_current");
+
+static struct device_attribute *bd2802_attributes[] = {
+ &bd2802_adv_conf_attr,
+ &bd2802_wave_pattern_attr,
+ &bd2802_rgb_current_attr,
+};
+
static void bd2802_led_work(struct work_struct *work)
{
struct bd2802_led *led = container_of(work, struct bd2802_led, work);
@@ -538,7 +588,6 @@ static int bd2802_register_led_classdev(struct bd2802_led *led)
led->cdev_led1r.brightness = LED_OFF;
led->cdev_led1r.brightness_set = bd2802_set_led1r_brightness;
led->cdev_led1r.blink_set = bd2802_set_led1r_blink;
- led->cdev_led1r.flags |= LED_CORE_SUSPENDRESUME;
ret = led_classdev_register(&led->client->dev, &led->cdev_led1r);
if (ret < 0) {
@@ -551,7 +600,6 @@ static int bd2802_register_led_classdev(struct bd2802_led *led)
led->cdev_led1g.brightness = LED_OFF;
led->cdev_led1g.brightness_set = bd2802_set_led1g_brightness;
led->cdev_led1g.blink_set = bd2802_set_led1g_blink;
- led->cdev_led1g.flags |= LED_CORE_SUSPENDRESUME;
ret = led_classdev_register(&led->client->dev, &led->cdev_led1g);
if (ret < 0) {
@@ -564,7 +612,6 @@ static int bd2802_register_led_classdev(struct bd2802_led *led)
led->cdev_led1b.brightness = LED_OFF;
led->cdev_led1b.brightness_set = bd2802_set_led1b_brightness;
led->cdev_led1b.blink_set = bd2802_set_led1b_blink;
- led->cdev_led1b.flags |= LED_CORE_SUSPENDRESUME;
ret = led_classdev_register(&led->client->dev, &led->cdev_led1b);
if (ret < 0) {
@@ -577,7 +624,6 @@ static int bd2802_register_led_classdev(struct bd2802_led *led)
led->cdev_led2r.brightness = LED_OFF;
led->cdev_led2r.brightness_set = bd2802_set_led2r_brightness;
led->cdev_led2r.blink_set = bd2802_set_led2r_blink;
- led->cdev_led2r.flags |= LED_CORE_SUSPENDRESUME;
ret = led_classdev_register(&led->client->dev, &led->cdev_led2r);
if (ret < 0) {
@@ -590,7 +636,6 @@ static int bd2802_register_led_classdev(struct bd2802_led *led)
led->cdev_led2g.brightness = LED_OFF;
led->cdev_led2g.brightness_set = bd2802_set_led2g_brightness;
led->cdev_led2g.blink_set = bd2802_set_led2g_blink;
- led->cdev_led2g.flags |= LED_CORE_SUSPENDRESUME;
ret = led_classdev_register(&led->client->dev, &led->cdev_led2g);
if (ret < 0) {
@@ -640,7 +685,7 @@ static int __devinit bd2802_probe(struct i2c_client *client,
{
struct bd2802_led *led;
struct bd2802_led_platform_data *pdata;
- int ret;
+ int ret, i;
led = kzalloc(sizeof(struct bd2802_led), GFP_KERNEL);
if (!led) {
@@ -670,13 +715,20 @@ static int __devinit bd2802_probe(struct i2c_client *client,
/* To save the power, reset BD2802 after detecting */
gpio_set_value(led->pdata->reset_gpio, 0);
+ /* Default attributes */
+ led->wave_pattern = BD2802_PATTERN_HALF;
+ led->rgb_current = BD2802_CURRENT_032;
+
init_rwsem(&led->rwsem);
- ret = device_create_file(&client->dev, &bd2802_adv_conf_attr);
- if (ret) {
- dev_err(&client->dev, "failed to create sysfs file %s\n",
- bd2802_adv_conf_attr.attr.name);
- goto failed_free;
+ for (i = 0; i < ARRAY_SIZE(bd2802_attributes); i++) {
+ ret = device_create_file(&led->client->dev,
+ bd2802_attributes[i]);
+ if (ret) {
+ dev_err(&led->client->dev, "failed: sysfs file %s\n",
+ bd2802_attributes[i]->attr.name);
+ goto failed_unregister_dev_file;
+ }
}
ret = bd2802_register_led_classdev(led);
@@ -686,7 +738,8 @@ static int __devinit bd2802_probe(struct i2c_client *client,
return 0;
failed_unregister_dev_file:
- device_remove_file(&client->dev, &bd2802_adv_conf_attr);
+ for (i--; i >= 0; i--)
+ device_remove_file(&led->client->dev, bd2802_attributes[i]);
failed_free:
i2c_set_clientdata(client, NULL);
kfree(led);
@@ -697,12 +750,14 @@ failed_free:
static int __exit bd2802_remove(struct i2c_client *client)
{
struct bd2802_led *led = i2c_get_clientdata(client);
+ int i;
- bd2802_unregister_led_classdev(led);
gpio_set_value(led->pdata->reset_gpio, 0);
+ bd2802_unregister_led_classdev(led);
if (led->adf_on)
bd2802_disable_adv_conf(led);
- device_remove_file(&client->dev, &bd2802_adv_conf_attr);
+ for (i = 0; i < ARRAY_SIZE(bd2802_attributes); i++)
+ device_remove_file(&led->client->dev, bd2802_attributes[i]);
i2c_set_clientdata(client, NULL);
kfree(led);
@@ -723,8 +778,7 @@ static int bd2802_resume(struct i2c_client *client)
struct bd2802_led *led = i2c_get_clientdata(client);
if (!bd2802_is_all_off(led) || led->adf_on) {
- gpio_set_value(led->pdata->reset_gpio, 1);
- udelay(100);
+ bd2802_reset_cancel(led);
bd2802_restore_state(led);
}
@@ -762,4 +816,4 @@ module_exit(bd2802_exit);
MODULE_AUTHOR("Kim Kyuwon <q1.kim@samsung.com>");
MODULE_DESCRIPTION("BD2802 LED driver");
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/leds/leds-cobalt-raq.c b/drivers/leds/leds-cobalt-raq.c
index ff0e8c3fbf9b..5f1ce810815f 100644
--- a/drivers/leds/leds-cobalt-raq.c
+++ b/drivers/leds/leds-cobalt-raq.c
@@ -1,7 +1,7 @@
/*
* LEDs driver for the Cobalt Raq series.
*
- * Copyright (C) 2007 Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp>
+ * Copyright (C) 2007 Yoichi Yuasa <yuasa@linux-mips.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/drivers/leds/leds-gpio.c b/drivers/leds/leds-gpio.c
index d2109054de85..6b06638eb5b4 100644
--- a/drivers/leds/leds-gpio.c
+++ b/drivers/leds/leds-gpio.c
@@ -76,7 +76,7 @@ static int __devinit create_gpio_led(const struct gpio_led *template,
struct gpio_led_data *led_dat, struct device *parent,
int (*blink_set)(unsigned, unsigned long *, unsigned long *))
{
- int ret;
+ int ret, state;
/* skip leds that aren't available */
if (!gpio_is_valid(template->gpio)) {
@@ -99,11 +99,15 @@ static int __devinit create_gpio_led(const struct gpio_led *template,
led_dat->cdev.blink_set = gpio_blink_set;
}
led_dat->cdev.brightness_set = gpio_led_set;
- led_dat->cdev.brightness = LED_OFF;
+ if (template->default_state == LEDS_GPIO_DEFSTATE_KEEP)
+ state = !!gpio_get_value(led_dat->gpio) ^ led_dat->active_low;
+ else
+ state = (template->default_state == LEDS_GPIO_DEFSTATE_ON);
+ led_dat->cdev.brightness = state ? LED_FULL : LED_OFF;
if (!template->retain_state_suspended)
led_dat->cdev.flags |= LED_CORE_SUSPENDRESUME;
- ret = gpio_direction_output(led_dat->gpio, led_dat->active_low);
+ ret = gpio_direction_output(led_dat->gpio, led_dat->active_low ^ state);
if (ret < 0)
goto err;
@@ -129,7 +133,7 @@ static void delete_gpio_led(struct gpio_led_data *led)
}
#ifdef CONFIG_LEDS_GPIO_PLATFORM
-static int gpio_led_probe(struct platform_device *pdev)
+static int __devinit gpio_led_probe(struct platform_device *pdev)
{
struct gpio_led_platform_data *pdata = pdev->dev.platform_data;
struct gpio_led_data *leds_data;
@@ -223,12 +227,22 @@ static int __devinit of_gpio_leds_probe(struct of_device *ofdev,
memset(&led, 0, sizeof(led));
for_each_child_of_node(np, child) {
enum of_gpio_flags flags;
+ const char *state;
led.gpio = of_get_gpio_flags(child, 0, &flags);
led.active_low = flags & OF_GPIO_ACTIVE_LOW;
led.name = of_get_property(child, "label", NULL) ? : child->name;
led.default_trigger =
of_get_property(child, "linux,default-trigger", NULL);
+ state = of_get_property(child, "default-state", NULL);
+ if (state) {
+ if (!strcmp(state, "keep"))
+ led.default_state = LEDS_GPIO_DEFSTATE_KEEP;
+ else if(!strcmp(state, "on"))
+ led.default_state = LEDS_GPIO_DEFSTATE_ON;
+ else
+ led.default_state = LEDS_GPIO_DEFSTATE_OFF;
+ }
ret = create_gpio_led(&led, &pdata->led_data[pdata->num_leds++],
&ofdev->dev, NULL);
diff --git a/drivers/leds/leds-lp3944.c b/drivers/leds/leds-lp3944.c
new file mode 100644
index 000000000000..5946208ba26e
--- /dev/null
+++ b/drivers/leds/leds-lp3944.c
@@ -0,0 +1,466 @@
+/*
+ * leds-lp3944.c - driver for National Semiconductor LP3944 Funlight Chip
+ *
+ * Copyright (C) 2009 Antonio Ospite <ospite@studenti.unina.it>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+/*
+ * I2C driver for National Semiconductor LP3944 Funlight Chip
+ * http://www.national.com/pf/LP/LP3944.html
+ *
+ * This helper chip can drive up to 8 leds, with two programmable DIM modes;
+ * it could even be used as a gpio expander but this driver assumes it is used
+ * as a led controller.
+ *
+ * The DIM modes are used to set _blink_ patterns for leds, the pattern is
+ * specified supplying two parameters:
+ * - period: from 0s to 1.6s
+ * - duty cycle: percentage of the period the led is on, from 0 to 100
+ *
+ * LP3944 can be found on Motorola A910 smartphone, where it drives the rgb
+ * leds, the camera flash light and the displays backlights.
+ */
+
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/leds.h>
+#include <linux/mutex.h>
+#include <linux/workqueue.h>
+#include <linux/leds-lp3944.h>
+
+/* Read Only Registers */
+#define LP3944_REG_INPUT1 0x00 /* LEDs 0-7 InputRegister (Read Only) */
+#define LP3944_REG_REGISTER1 0x01 /* None (Read Only) */
+
+#define LP3944_REG_PSC0 0x02 /* Frequency Prescaler 0 (R/W) */
+#define LP3944_REG_PWM0 0x03 /* PWM Register 0 (R/W) */
+#define LP3944_REG_PSC1 0x04 /* Frequency Prescaler 1 (R/W) */
+#define LP3944_REG_PWM1 0x05 /* PWM Register 1 (R/W) */
+#define LP3944_REG_LS0 0x06 /* LEDs 0-3 Selector (R/W) */
+#define LP3944_REG_LS1 0x07 /* LEDs 4-7 Selector (R/W) */
+
+/* These registers are not used to control leds in LP3944, they can store
+ * arbitrary values which the chip will ignore.
+ */
+#define LP3944_REG_REGISTER8 0x08
+#define LP3944_REG_REGISTER9 0x09
+
+#define LP3944_DIM0 0
+#define LP3944_DIM1 1
+
+/* period in ms */
+#define LP3944_PERIOD_MIN 0
+#define LP3944_PERIOD_MAX 1600
+
+/* duty cycle is a percentage */
+#define LP3944_DUTY_CYCLE_MIN 0
+#define LP3944_DUTY_CYCLE_MAX 100
+
+#define ldev_to_led(c) container_of(c, struct lp3944_led_data, ldev)
+
+/* Saved data */
+struct lp3944_led_data {
+ u8 id;
+ enum lp3944_type type;
+ enum lp3944_status status;
+ struct led_classdev ldev;
+ struct i2c_client *client;
+ struct work_struct work;
+};
+
+struct lp3944_data {
+ struct mutex lock;
+ struct i2c_client *client;
+ struct lp3944_led_data leds[LP3944_LEDS_MAX];
+};
+
+static int lp3944_reg_read(struct i2c_client *client, u8 reg, u8 *value)
+{
+ int tmp;
+
+ tmp = i2c_smbus_read_byte_data(client, reg);
+ if (tmp < 0)
+ return -EINVAL;
+
+ *value = tmp;
+
+ return 0;
+}
+
+static int lp3944_reg_write(struct i2c_client *client, u8 reg, u8 value)
+{
+ return i2c_smbus_write_byte_data(client, reg, value);
+}
+
+/**
+ * Set the period for DIM status
+ *
+ * @client: the i2c client
+ * @dim: either LP3944_DIM0 or LP3944_DIM1
+ * @period: period of a blink, that is a on/off cycle, expressed in ms.
+ */
+static int lp3944_dim_set_period(struct i2c_client *client, u8 dim, u16 period)
+{
+ u8 psc_reg;
+ u8 psc_value;
+ int err;
+
+ if (dim == LP3944_DIM0)
+ psc_reg = LP3944_REG_PSC0;
+ else if (dim == LP3944_DIM1)
+ psc_reg = LP3944_REG_PSC1;
+ else
+ return -EINVAL;
+
+ /* Convert period to Prescaler value */
+ if (period > LP3944_PERIOD_MAX)
+ return -EINVAL;
+
+ psc_value = (period * 255) / LP3944_PERIOD_MAX;
+
+ err = lp3944_reg_write(client, psc_reg, psc_value);
+
+ return err;
+}
+
+/**
+ * Set the duty cycle for DIM status
+ *
+ * @client: the i2c client
+ * @dim: either LP3944_DIM0 or LP3944_DIM1
+ * @duty_cycle: percentage of a period during which a led is ON
+ */
+static int lp3944_dim_set_dutycycle(struct i2c_client *client, u8 dim,
+ u8 duty_cycle)
+{
+ u8 pwm_reg;
+ u8 pwm_value;
+ int err;
+
+ if (dim == LP3944_DIM0)
+ pwm_reg = LP3944_REG_PWM0;
+ else if (dim == LP3944_DIM1)
+ pwm_reg = LP3944_REG_PWM1;
+ else
+ return -EINVAL;
+
+ /* Convert duty cycle to PWM value */
+ if (duty_cycle > LP3944_DUTY_CYCLE_MAX)
+ return -EINVAL;
+
+ pwm_value = (duty_cycle * 255) / LP3944_DUTY_CYCLE_MAX;
+
+ err = lp3944_reg_write(client, pwm_reg, pwm_value);
+
+ return err;
+}
+
+/**
+ * Set the led status
+ *
+ * @led: a lp3944_led_data structure
+ * @status: one of LP3944_LED_STATUS_OFF
+ * LP3944_LED_STATUS_ON
+ * LP3944_LED_STATUS_DIM0
+ * LP3944_LED_STATUS_DIM1
+ */
+static int lp3944_led_set(struct lp3944_led_data *led, u8 status)
+{
+ struct lp3944_data *data = i2c_get_clientdata(led->client);
+ u8 id = led->id;
+ u8 reg;
+ u8 val = 0;
+ int err;
+
+ dev_dbg(&led->client->dev, "%s: %s, status before normalization:%d\n",
+ __func__, led->ldev.name, status);
+
+ switch (id) {
+ case LP3944_LED0:
+ case LP3944_LED1:
+ case LP3944_LED2:
+ case LP3944_LED3:
+ reg = LP3944_REG_LS0;
+ break;
+ case LP3944_LED4:
+ case LP3944_LED5:
+ case LP3944_LED6:
+ case LP3944_LED7:
+ id -= LP3944_LED4;
+ reg = LP3944_REG_LS1;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (status > LP3944_LED_STATUS_DIM1)
+ return -EINVAL;
+
+ /* invert only 0 and 1, leave unchanged the other values,
+ * remember we are abusing status to set blink patterns
+ */
+ if (led->type == LP3944_LED_TYPE_LED_INVERTED && status < 2)
+ status = 1 - status;
+
+ mutex_lock(&data->lock);
+ lp3944_reg_read(led->client, reg, &val);
+
+ val &= ~(LP3944_LED_STATUS_MASK << (id << 1));
+ val |= (status << (id << 1));
+
+ dev_dbg(&led->client->dev, "%s: %s, reg:%d id:%d status:%d val:%#x\n",
+ __func__, led->ldev.name, reg, id, status, val);
+
+ /* set led status */
+ err = lp3944_reg_write(led->client, reg, val);
+ mutex_unlock(&data->lock);
+
+ return err;
+}
+
+static int lp3944_led_set_blink(struct led_classdev *led_cdev,
+ unsigned long *delay_on,
+ unsigned long *delay_off)
+{
+ struct lp3944_led_data *led = ldev_to_led(led_cdev);
+ u16 period;
+ u8 duty_cycle;
+ int err;
+
+ /* units are in ms */
+ if (*delay_on + *delay_off > LP3944_PERIOD_MAX)
+ return -EINVAL;
+
+ if (*delay_on == 0 && *delay_off == 0) {
+ /* Special case: the leds subsystem requires a default user
+ * friendly blink pattern for the LED. Let's blink the led
+ * slowly (1Hz).
+ */
+ *delay_on = 500;
+ *delay_off = 500;
+ }
+
+ period = (*delay_on) + (*delay_off);
+
+ /* duty_cycle is the percentage of period during which the led is ON */
+ duty_cycle = 100 * (*delay_on) / period;
+
+ /* invert duty cycle for inverted leds, this has the same effect of
+ * swapping delay_on and delay_off
+ */
+ if (led->type == LP3944_LED_TYPE_LED_INVERTED)
+ duty_cycle = 100 - duty_cycle;
+
+ /* NOTE: using always the first DIM mode, this means that all leds
+ * will have the same blinking pattern.
+ *
+ * We could find a way later to have two leds blinking in hardware
+ * with different patterns at the same time, falling back to software
+ * control for the other ones.
+ */
+ err = lp3944_dim_set_period(led->client, LP3944_DIM0, period);
+ if (err)
+ return err;
+
+ err = lp3944_dim_set_dutycycle(led->client, LP3944_DIM0, duty_cycle);
+ if (err)
+ return err;
+
+ dev_dbg(&led->client->dev, "%s: OK hardware accelerated blink!\n",
+ __func__);
+
+ led->status = LP3944_LED_STATUS_DIM0;
+ schedule_work(&led->work);
+
+ return 0;
+}
+
+static void lp3944_led_set_brightness(struct led_classdev *led_cdev,
+ enum led_brightness brightness)
+{
+ struct lp3944_led_data *led = ldev_to_led(led_cdev);
+
+ dev_dbg(&led->client->dev, "%s: %s, %d\n",
+ __func__, led_cdev->name, brightness);
+
+ led->status = brightness;
+ schedule_work(&led->work);
+}
+
+static void lp3944_led_work(struct work_struct *work)
+{
+ struct lp3944_led_data *led;
+
+ led = container_of(work, struct lp3944_led_data, work);
+ lp3944_led_set(led, led->status);
+}
+
+static int lp3944_configure(struct i2c_client *client,
+ struct lp3944_data *data,
+ struct lp3944_platform_data *pdata)
+{
+ int i, err = 0;
+
+ for (i = 0; i < pdata->leds_size; i++) {
+ struct lp3944_led *pled = &pdata->leds[i];
+ struct lp3944_led_data *led = &data->leds[i];
+ led->client = client;
+ led->id = i;
+
+ switch (pled->type) {
+
+ case LP3944_LED_TYPE_LED:
+ case LP3944_LED_TYPE_LED_INVERTED:
+ led->type = pled->type;
+ led->status = pled->status;
+ led->ldev.name = pled->name;
+ led->ldev.max_brightness = 1;
+ led->ldev.brightness_set = lp3944_led_set_brightness;
+ led->ldev.blink_set = lp3944_led_set_blink;
+ led->ldev.flags = LED_CORE_SUSPENDRESUME;
+
+ INIT_WORK(&led->work, lp3944_led_work);
+ err = led_classdev_register(&client->dev, &led->ldev);
+ if (err < 0) {
+ dev_err(&client->dev,
+ "couldn't register LED %s\n",
+ led->ldev.name);
+ goto exit;
+ }
+
+ /* to expose the default value to userspace */
+ led->ldev.brightness = led->status;
+
+ /* Set the default led status */
+ err = lp3944_led_set(led, led->status);
+ if (err < 0) {
+ dev_err(&client->dev,
+ "%s couldn't set STATUS %d\n",
+ led->ldev.name, led->status);
+ goto exit;
+ }
+ break;
+
+ case LP3944_LED_TYPE_NONE:
+ default:
+ break;
+
+ }
+ }
+ return 0;
+
+exit:
+ if (i > 0)
+ for (i = i - 1; i >= 0; i--)
+ switch (pdata->leds[i].type) {
+
+ case LP3944_LED_TYPE_LED:
+ case LP3944_LED_TYPE_LED_INVERTED:
+ led_classdev_unregister(&data->leds[i].ldev);
+ cancel_work_sync(&data->leds[i].work);
+ break;
+
+ case LP3944_LED_TYPE_NONE:
+ default:
+ break;
+ }
+
+ return err;
+}
+
+static int __devinit lp3944_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct lp3944_platform_data *lp3944_pdata = client->dev.platform_data;
+ struct lp3944_data *data;
+
+ if (lp3944_pdata == NULL) {
+ dev_err(&client->dev, "no platform data\n");
+ return -EINVAL;
+ }
+
+ /* Let's see whether this adapter can support what we need. */
+ if (!i2c_check_functionality(client->adapter,
+ I2C_FUNC_SMBUS_BYTE_DATA)) {
+ dev_err(&client->dev, "insufficient functionality!\n");
+ return -ENODEV;
+ }
+
+ data = kzalloc(sizeof(struct lp3944_data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->client = client;
+ i2c_set_clientdata(client, data);
+
+ mutex_init(&data->lock);
+
+ dev_info(&client->dev, "lp3944 enabled\n");
+
+ lp3944_configure(client, data, lp3944_pdata);
+ return 0;
+}
+
+static int __devexit lp3944_remove(struct i2c_client *client)
+{
+ struct lp3944_platform_data *pdata = client->dev.platform_data;
+ struct lp3944_data *data = i2c_get_clientdata(client);
+ int i;
+
+ for (i = 0; i < pdata->leds_size; i++)
+ switch (data->leds[i].type) {
+ case LP3944_LED_TYPE_LED:
+ case LP3944_LED_TYPE_LED_INVERTED:
+ led_classdev_unregister(&data->leds[i].ldev);
+ cancel_work_sync(&data->leds[i].work);
+ break;
+
+ case LP3944_LED_TYPE_NONE:
+ default:
+ break;
+ }
+
+ kfree(data);
+ i2c_set_clientdata(client, NULL);
+
+ return 0;
+}
+
+/* lp3944 i2c driver struct */
+static const struct i2c_device_id lp3944_id[] = {
+ {"lp3944", 0},
+ {}
+};
+
+MODULE_DEVICE_TABLE(i2c, lp3944_id);
+
+static struct i2c_driver lp3944_driver = {
+ .driver = {
+ .name = "lp3944",
+ },
+ .probe = lp3944_probe,
+ .remove = __devexit_p(lp3944_remove),
+ .id_table = lp3944_id,
+};
+
+static int __init lp3944_module_init(void)
+{
+ return i2c_add_driver(&lp3944_driver);
+}
+
+static void __exit lp3944_module_exit(void)
+{
+ i2c_del_driver(&lp3944_driver);
+}
+
+module_init(lp3944_module_init);
+module_exit(lp3944_module_exit);
+
+MODULE_AUTHOR("Antonio Ospite <ospite@studenti.unina.it>");
+MODULE_DESCRIPTION("LP3944 Fun Light Chip");
+MODULE_LICENSE("GPL");
diff --git a/drivers/leds/leds-pca9532.c b/drivers/leds/leds-pca9532.c
index 3937244fdcab..dba8921240f2 100644
--- a/drivers/leds/leds-pca9532.c
+++ b/drivers/leds/leds-pca9532.c
@@ -35,7 +35,7 @@ struct pca9532_data {
struct pca9532_led leds[16];
struct mutex update_lock;
struct input_dev *idev;
- struct work_struct work;
+ struct work_struct work;
u8 pwm[2];
u8 psc[2];
};
@@ -87,14 +87,14 @@ static int pca9532_calcpwm(struct i2c_client *client, int pwm, int blink,
if (b > 0xFF)
return -EINVAL;
data->pwm[pwm] = b;
- data->psc[pwm] = blink;
- return 0;
+ data->psc[pwm] = blink;
+ return 0;
}
static int pca9532_setpwm(struct i2c_client *client, int pwm)
{
- struct pca9532_data *data = i2c_get_clientdata(client);
- mutex_lock(&data->update_lock);
+ struct pca9532_data *data = i2c_get_clientdata(client);
+ mutex_lock(&data->update_lock);
i2c_smbus_write_byte_data(client, PCA9532_REG_PWM(pwm),
data->pwm[pwm]);
i2c_smbus_write_byte_data(client, PCA9532_REG_PSC(pwm),
@@ -132,11 +132,11 @@ static void pca9532_set_brightness(struct led_classdev *led_cdev,
led->state = PCA9532_ON;
else {
led->state = PCA9532_PWM0; /* Thecus: hardcode one pwm */
- err = pca9532_calcpwm(led->client, 0, 0, value);
+ err = pca9532_calcpwm(led->client, 0, 0, value);
if (err)
return; /* XXX: led api doesn't allow error code? */
}
- schedule_work(&led->work);
+ schedule_work(&led->work);
}
static int pca9532_set_blink(struct led_classdev *led_cdev,
@@ -145,7 +145,7 @@ static int pca9532_set_blink(struct led_classdev *led_cdev,
struct pca9532_led *led = ldev_to_led(led_cdev);
struct i2c_client *client = led->client;
int psc;
- int err = 0;
+ int err = 0;
if (*delay_on == 0 && *delay_off == 0) {
/* led subsystem ask us for a blink rate */
@@ -157,11 +157,11 @@ static int pca9532_set_blink(struct led_classdev *led_cdev,
/* Thecus specific: only use PSC/PWM 0 */
psc = (*delay_on * 152-1)/1000;
- err = pca9532_calcpwm(client, 0, psc, led_cdev->brightness);
- if (err)
- return err;
- schedule_work(&led->work);
- return 0;
+ err = pca9532_calcpwm(client, 0, psc, led_cdev->brightness);
+ if (err)
+ return err;
+ schedule_work(&led->work);
+ return 0;
}
static int pca9532_event(struct input_dev *dev, unsigned int type,
@@ -178,15 +178,15 @@ static int pca9532_event(struct input_dev *dev, unsigned int type,
else
data->pwm[1] = 0;
- schedule_work(&data->work);
+ schedule_work(&data->work);
- return 0;
+ return 0;
}
static void pca9532_input_work(struct work_struct *work)
{
- struct pca9532_data *data;
- data = container_of(work, struct pca9532_data, work);
+ struct pca9532_data *data;
+ data = container_of(work, struct pca9532_data, work);
mutex_lock(&data->update_lock);
i2c_smbus_write_byte_data(data->client, PCA9532_REG_PWM(1),
data->pwm[1]);
@@ -195,11 +195,11 @@ static void pca9532_input_work(struct work_struct *work)
static void pca9532_led_work(struct work_struct *work)
{
- struct pca9532_led *led;
- led = container_of(work, struct pca9532_led, work);
- if (led->state == PCA9532_PWM0)
- pca9532_setpwm(led->client, 0);
- pca9532_setled(led);
+ struct pca9532_led *led;
+ led = container_of(work, struct pca9532_led, work);
+ if (led->state == PCA9532_PWM0)
+ pca9532_setpwm(led->client, 0);
+ pca9532_setled(led);
}
static int pca9532_configure(struct i2c_client *client,
@@ -232,7 +232,7 @@ static int pca9532_configure(struct i2c_client *client,
led->ldev.brightness = LED_OFF;
led->ldev.brightness_set = pca9532_set_brightness;
led->ldev.blink_set = pca9532_set_blink;
- INIT_WORK(&led->work, pca9532_led_work);
+ INIT_WORK(&led->work, pca9532_led_work);
err = led_classdev_register(&client->dev, &led->ldev);
if (err < 0) {
dev_err(&client->dev,
@@ -262,11 +262,11 @@ static int pca9532_configure(struct i2c_client *client,
BIT_MASK(SND_TONE);
data->idev->event = pca9532_event;
input_set_drvdata(data->idev, data);
- INIT_WORK(&data->work, pca9532_input_work);
+ INIT_WORK(&data->work, pca9532_input_work);
err = input_register_device(data->idev);
if (err) {
input_free_device(data->idev);
- cancel_work_sync(&data->work);
+ cancel_work_sync(&data->work);
data->idev = NULL;
goto exit;
}
@@ -283,13 +283,13 @@ exit:
break;
case PCA9532_TYPE_LED:
led_classdev_unregister(&data->leds[i].ldev);
- cancel_work_sync(&data->leds[i].work);
+ cancel_work_sync(&data->leds[i].work);
break;
case PCA9532_TYPE_N2100_BEEP:
if (data->idev != NULL) {
input_unregister_device(data->idev);
input_free_device(data->idev);
- cancel_work_sync(&data->work);
+ cancel_work_sync(&data->work);
data->idev = NULL;
}
break;
@@ -340,13 +340,13 @@ static int pca9532_remove(struct i2c_client *client)
break;
case PCA9532_TYPE_LED:
led_classdev_unregister(&data->leds[i].ldev);
- cancel_work_sync(&data->leds[i].work);
+ cancel_work_sync(&data->leds[i].work);
break;
case PCA9532_TYPE_N2100_BEEP:
if (data->idev != NULL) {
input_unregister_device(data->idev);
input_free_device(data->idev);
- cancel_work_sync(&data->work);
+ cancel_work_sync(&data->work);
data->idev = NULL;
}
break;
diff --git a/drivers/lguest/lg.h b/drivers/lguest/lg.h
index d4e8979735cb..9c3138265f8e 100644
--- a/drivers/lguest/lg.h
+++ b/drivers/lguest/lg.h
@@ -82,7 +82,7 @@ struct lg_cpu {
struct lg_eventfd {
unsigned long addr;
- struct file *event;
+ struct eventfd_ctx *event;
};
struct lg_eventfd_map {
diff --git a/drivers/lguest/lguest_user.c b/drivers/lguest/lguest_user.c
index 32e297121058..9f9a2953b383 100644
--- a/drivers/lguest/lguest_user.c
+++ b/drivers/lguest/lguest_user.c
@@ -50,7 +50,7 @@ static int add_eventfd(struct lguest *lg, unsigned long addr, int fd)
/* Now append new entry. */
new->map[new->num].addr = addr;
- new->map[new->num].event = eventfd_fget(fd);
+ new->map[new->num].event = eventfd_ctx_fdget(fd);
if (IS_ERR(new->map[new->num].event)) {
kfree(new);
return PTR_ERR(new->map[new->num].event);
@@ -357,7 +357,7 @@ static int close(struct inode *inode, struct file *file)
/* Release any eventfds they registered. */
for (i = 0; i < lg->eventfds->num; i++)
- fput(lg->eventfds->map[i].event);
+ eventfd_ctx_put(lg->eventfds->map[i].event);
kfree(lg->eventfds);
/* If lg->dead doesn't contain an error code it will be NULL or a
diff --git a/drivers/macintosh/macio_asic.c b/drivers/macintosh/macio_asic.c
index 6e149f4a1fff..a0f68386c12f 100644
--- a/drivers/macintosh/macio_asic.c
+++ b/drivers/macintosh/macio_asic.c
@@ -378,6 +378,17 @@ static struct macio_dev * macio_add_one_device(struct macio_chip *chip,
dev->ofdev.dev.bus = &macio_bus_type;
dev->ofdev.dev.release = macio_release_dev;
+#ifdef CONFIG_PCI
+ /* Set the DMA ops to the ones from the PCI device, this could be
+ * fishy if we didn't know that on PowerMac it's always direct ops
+ * or iommu ops that will work fine
+ */
+ dev->ofdev.dev.archdata.dma_ops =
+ chip->lbus.pdev->dev.archdata.dma_ops;
+ dev->ofdev.dev.archdata.dma_data =
+ chip->lbus.pdev->dev.archdata.dma_data;
+#endif /* CONFIG_PCI */
+
#ifdef DEBUG
printk("preparing mdev @%p, ofdev @%p, dev @%p, kobj @%p\n",
dev, &dev->ofdev, &dev->ofdev.dev, &dev->ofdev.dev.kobj);
diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig
index 36e0675be9f7..020f9573fd82 100644
--- a/drivers/md/Kconfig
+++ b/drivers/md/Kconfig
@@ -231,6 +231,17 @@ config DM_MIRROR
Allow volume managers to mirror logical volumes, also
needed for live data migration tools such as 'pvmove'.
+config DM_LOG_USERSPACE
+ tristate "Mirror userspace logging (EXPERIMENTAL)"
+ depends on DM_MIRROR && EXPERIMENTAL && NET
+ select CONNECTOR
+ ---help---
+ The userspace logging module provides a mechanism for
+ relaying the dm-dirty-log API to userspace. Log designs
+ which are more suited to userspace implementation (e.g.
+ shared storage logs) or experimental logs can be implemented
+ by leveraging this framework.
+
config DM_ZERO
tristate "Zero target"
depends on BLK_DEV_DM
@@ -249,6 +260,25 @@ config DM_MULTIPATH
---help---
Allow volume managers to support multipath hardware.
+config DM_MULTIPATH_QL
+ tristate "I/O Path Selector based on the number of in-flight I/Os"
+ depends on DM_MULTIPATH
+ ---help---
+ This path selector is a dynamic load balancer which selects
+ the path with the least number of in-flight I/Os.
+
+ If unsure, say N.
+
+config DM_MULTIPATH_ST
+ tristate "I/O Path Selector based on the service time"
+ depends on DM_MULTIPATH
+ ---help---
+ This path selector is a dynamic load balancer which selects
+ the path expected to complete the incoming I/O in the shortest
+ time.
+
+ If unsure, say N.
+
config DM_DELAY
tristate "I/O delaying target (EXPERIMENTAL)"
depends on BLK_DEV_DM && EXPERIMENTAL
diff --git a/drivers/md/Makefile b/drivers/md/Makefile
index 45cc5951d928..1dc4185bd781 100644
--- a/drivers/md/Makefile
+++ b/drivers/md/Makefile
@@ -8,6 +8,8 @@ dm-multipath-y += dm-path-selector.o dm-mpath.o
dm-snapshot-y += dm-snap.o dm-exception-store.o dm-snap-transient.o \
dm-snap-persistent.o
dm-mirror-y += dm-raid1.o
+dm-log-userspace-y \
+ += dm-log-userspace-base.o dm-log-userspace-transfer.o
md-mod-y += md.o bitmap.o
raid456-y += raid5.o
raid6_pq-y += raid6algos.o raid6recov.o raid6tables.o \
@@ -36,8 +38,11 @@ obj-$(CONFIG_BLK_DEV_DM) += dm-mod.o
obj-$(CONFIG_DM_CRYPT) += dm-crypt.o
obj-$(CONFIG_DM_DELAY) += dm-delay.o
obj-$(CONFIG_DM_MULTIPATH) += dm-multipath.o dm-round-robin.o
+obj-$(CONFIG_DM_MULTIPATH_QL) += dm-queue-length.o
+obj-$(CONFIG_DM_MULTIPATH_ST) += dm-service-time.o
obj-$(CONFIG_DM_SNAPSHOT) += dm-snapshot.o
obj-$(CONFIG_DM_MIRROR) += dm-mirror.o dm-log.o dm-region-hash.o
+obj-$(CONFIG_DM_LOG_USERSPACE) += dm-log-userspace.o
obj-$(CONFIG_DM_ZERO) += dm-zero.o
quiet_cmd_unroll = UNROLL $@
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index 53394e863c74..529e2ba505c3 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -776,7 +776,7 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
* But don't wait if split was due to the io size restriction
*/
if (unlikely(out_of_pages))
- congestion_wait(WRITE, HZ/100);
+ congestion_wait(BLK_RW_ASYNC, HZ/100);
/*
* With async crypto it is unsafe to share the crypto context
@@ -1132,6 +1132,7 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
goto bad_crypt_queue;
}
+ ti->num_flush_requests = 1;
ti->private = cc;
return 0;
@@ -1189,6 +1190,13 @@ static int crypt_map(struct dm_target *ti, struct bio *bio,
union map_info *map_context)
{
struct dm_crypt_io *io;
+ struct crypt_config *cc;
+
+ if (unlikely(bio_empty_barrier(bio))) {
+ cc = ti->private;
+ bio->bi_bdev = cc->dev->bdev;
+ return DM_MAPIO_REMAPPED;
+ }
io = crypt_io_alloc(ti, bio, bio->bi_sector - ti->begin);
@@ -1305,9 +1313,17 @@ static int crypt_merge(struct dm_target *ti, struct bvec_merge_data *bvm,
return min(max_size, q->merge_bvec_fn(q, bvm, biovec));
}
+static int crypt_iterate_devices(struct dm_target *ti,
+ iterate_devices_callout_fn fn, void *data)
+{
+ struct crypt_config *cc = ti->private;
+
+ return fn(ti, cc->dev, cc->start, data);
+}
+
static struct target_type crypt_target = {
.name = "crypt",
- .version= {1, 6, 0},
+ .version = {1, 7, 0},
.module = THIS_MODULE,
.ctr = crypt_ctr,
.dtr = crypt_dtr,
@@ -1318,6 +1334,7 @@ static struct target_type crypt_target = {
.resume = crypt_resume,
.message = crypt_message,
.merge = crypt_merge,
+ .iterate_devices = crypt_iterate_devices,
};
static int __init dm_crypt_init(void)
diff --git a/drivers/md/dm-delay.c b/drivers/md/dm-delay.c
index 559dbb52bc85..4e5b843cd4d7 100644
--- a/drivers/md/dm-delay.c
+++ b/drivers/md/dm-delay.c
@@ -197,6 +197,7 @@ out:
mutex_init(&dc->timer_lock);
atomic_set(&dc->may_delay, 1);
+ ti->num_flush_requests = 1;
ti->private = dc;
return 0;
@@ -278,8 +279,9 @@ static int delay_map(struct dm_target *ti, struct bio *bio,
if ((bio_data_dir(bio) == WRITE) && (dc->dev_write)) {
bio->bi_bdev = dc->dev_write->bdev;
- bio->bi_sector = dc->start_write +
- (bio->bi_sector - ti->begin);
+ if (bio_sectors(bio))
+ bio->bi_sector = dc->start_write +
+ (bio->bi_sector - ti->begin);
return delay_bio(dc, dc->write_delay, bio);
}
@@ -316,9 +318,26 @@ static int delay_status(struct dm_target *ti, status_type_t type,
return 0;
}
+static int delay_iterate_devices(struct dm_target *ti,
+ iterate_devices_callout_fn fn, void *data)
+{
+ struct delay_c *dc = ti->private;
+ int ret = 0;
+
+ ret = fn(ti, dc->dev_read, dc->start_read, data);
+ if (ret)
+ goto out;
+
+ if (dc->dev_write)
+ ret = fn(ti, dc->dev_write, dc->start_write, data);
+
+out:
+ return ret;
+}
+
static struct target_type delay_target = {
.name = "delay",
- .version = {1, 0, 2},
+ .version = {1, 1, 0},
.module = THIS_MODULE,
.ctr = delay_ctr,
.dtr = delay_dtr,
@@ -326,6 +345,7 @@ static struct target_type delay_target = {
.presuspend = delay_presuspend,
.resume = delay_resume,
.status = delay_status,
+ .iterate_devices = delay_iterate_devices,
};
static int __init dm_delay_init(void)
diff --git a/drivers/md/dm-exception-store.c b/drivers/md/dm-exception-store.c
index 75d8081a9041..3710ff88fc10 100644
--- a/drivers/md/dm-exception-store.c
+++ b/drivers/md/dm-exception-store.c
@@ -195,7 +195,7 @@ int dm_exception_store_create(struct dm_target *ti, int argc, char **argv,
struct dm_exception_store **store)
{
int r = 0;
- struct dm_exception_store_type *type;
+ struct dm_exception_store_type *type = NULL;
struct dm_exception_store *tmp_store;
char persistent;
@@ -211,12 +211,15 @@ int dm_exception_store_create(struct dm_target *ti, int argc, char **argv,
}
persistent = toupper(*argv[1]);
- if (persistent != 'P' && persistent != 'N') {
+ if (persistent == 'P')
+ type = get_type("P");
+ else if (persistent == 'N')
+ type = get_type("N");
+ else {
ti->error = "Persistent flag is not P or N";
return -EINVAL;
}
- type = get_type(argv[1]);
if (!type) {
ti->error = "Exception store type not recognised";
r = -EINVAL;
diff --git a/drivers/md/dm-exception-store.h b/drivers/md/dm-exception-store.h
index c92701dc5001..2442c8c07898 100644
--- a/drivers/md/dm-exception-store.h
+++ b/drivers/md/dm-exception-store.h
@@ -156,7 +156,7 @@ static inline void dm_consecutive_chunk_count_inc(struct dm_snap_exception *e)
*/
static inline sector_t get_dev_size(struct block_device *bdev)
{
- return bdev->bd_inode->i_size >> SECTOR_SHIFT;
+ return i_size_read(bdev->bd_inode) >> SECTOR_SHIFT;
}
static inline chunk_t sector_to_chunk(struct dm_exception_store *store,
diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c
index e73aabd61cd7..3a2e6a2f8bdd 100644
--- a/drivers/md/dm-io.c
+++ b/drivers/md/dm-io.c
@@ -22,6 +22,7 @@ struct dm_io_client {
/* FIXME: can we shrink this ? */
struct io {
unsigned long error_bits;
+ unsigned long eopnotsupp_bits;
atomic_t count;
struct task_struct *sleeper;
struct dm_io_client *client;
@@ -107,8 +108,11 @@ static inline unsigned bio_get_region(struct bio *bio)
*---------------------------------------------------------------*/
static void dec_count(struct io *io, unsigned int region, int error)
{
- if (error)
+ if (error) {
set_bit(region, &io->error_bits);
+ if (error == -EOPNOTSUPP)
+ set_bit(region, &io->eopnotsupp_bits);
+ }
if (atomic_dec_and_test(&io->count)) {
if (io->sleeper)
@@ -360,7 +364,9 @@ static int sync_io(struct dm_io_client *client, unsigned int num_regions,
return -EIO;
}
+retry:
io.error_bits = 0;
+ io.eopnotsupp_bits = 0;
atomic_set(&io.count, 1); /* see dispatch_io() */
io.sleeper = current;
io.client = client;
@@ -377,6 +383,11 @@ static int sync_io(struct dm_io_client *client, unsigned int num_regions,
}
set_current_state(TASK_RUNNING);
+ if (io.eopnotsupp_bits && (rw & (1 << BIO_RW_BARRIER))) {
+ rw &= ~(1 << BIO_RW_BARRIER);
+ goto retry;
+ }
+
if (error_bits)
*error_bits = io.error_bits;
@@ -397,6 +408,7 @@ static int async_io(struct dm_io_client *client, unsigned int num_regions,
io = mempool_alloc(client->pool, GFP_NOIO);
io->error_bits = 0;
+ io->eopnotsupp_bits = 0;
atomic_set(&io->count, 1); /* see dispatch_io() */
io->sleeper = NULL;
io->client = client;
diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
index 1128d3fba797..7f77f18fcafa 100644
--- a/drivers/md/dm-ioctl.c
+++ b/drivers/md/dm-ioctl.c
@@ -276,7 +276,7 @@ retry:
up_write(&_hash_lock);
}
-static int dm_hash_rename(const char *old, const char *new)
+static int dm_hash_rename(uint32_t cookie, const char *old, const char *new)
{
char *new_name, *old_name;
struct hash_cell *hc;
@@ -333,7 +333,7 @@ static int dm_hash_rename(const char *old, const char *new)
dm_table_put(table);
}
- dm_kobject_uevent(hc->md);
+ dm_kobject_uevent(hc->md, KOBJ_CHANGE, cookie);
dm_put(hc->md);
up_write(&_hash_lock);
@@ -680,6 +680,9 @@ static int dev_remove(struct dm_ioctl *param, size_t param_size)
__hash_remove(hc);
up_write(&_hash_lock);
+
+ dm_kobject_uevent(md, KOBJ_REMOVE, param->event_nr);
+
dm_put(md);
param->data_size = 0;
return 0;
@@ -715,7 +718,7 @@ static int dev_rename(struct dm_ioctl *param, size_t param_size)
return r;
param->data_size = 0;
- return dm_hash_rename(param->name, new_name);
+ return dm_hash_rename(param->event_nr, param->name, new_name);
}
static int dev_set_geometry(struct dm_ioctl *param, size_t param_size)
@@ -842,8 +845,11 @@ static int do_resume(struct dm_ioctl *param)
if (dm_suspended(md))
r = dm_resume(md);
- if (!r)
+
+ if (!r) {
+ dm_kobject_uevent(md, KOBJ_CHANGE, param->event_nr);
r = __dev_status(md, param);
+ }
dm_put(md);
return r;
@@ -1044,6 +1050,12 @@ static int populate_table(struct dm_table *table,
next = spec->next;
}
+ r = dm_table_set_type(table);
+ if (r) {
+ DMWARN("unable to set table type");
+ return r;
+ }
+
return dm_table_complete(table);
}
@@ -1089,6 +1101,13 @@ static int table_load(struct dm_ioctl *param, size_t param_size)
goto out;
}
+ r = dm_table_alloc_md_mempools(t);
+ if (r) {
+ DMWARN("unable to allocate mempools for this table");
+ dm_table_destroy(t);
+ goto out;
+ }
+
down_write(&_hash_lock);
hc = dm_get_mdptr(md);
if (!hc || hc->md != md) {
diff --git a/drivers/md/dm-linear.c b/drivers/md/dm-linear.c
index 79fb53e51c70..9184b6deb868 100644
--- a/drivers/md/dm-linear.c
+++ b/drivers/md/dm-linear.c
@@ -53,6 +53,7 @@ static int linear_ctr(struct dm_target *ti, unsigned int argc, char **argv)
goto bad;
}
+ ti->num_flush_requests = 1;
ti->private = lc;
return 0;
@@ -81,7 +82,8 @@ static void linear_map_bio(struct dm_target *ti, struct bio *bio)
struct linear_c *lc = ti->private;
bio->bi_bdev = lc->dev->bdev;
- bio->bi_sector = linear_map_sector(ti, bio->bi_sector);
+ if (bio_sectors(bio))
+ bio->bi_sector = linear_map_sector(ti, bio->bi_sector);
}
static int linear_map(struct dm_target *ti, struct bio *bio,
@@ -132,9 +134,17 @@ static int linear_merge(struct dm_target *ti, struct bvec_merge_data *bvm,
return min(max_size, q->merge_bvec_fn(q, bvm, biovec));
}
+static int linear_iterate_devices(struct dm_target *ti,
+ iterate_devices_callout_fn fn, void *data)
+{
+ struct linear_c *lc = ti->private;
+
+ return fn(ti, lc->dev, lc->start, data);
+}
+
static struct target_type linear_target = {
.name = "linear",
- .version= {1, 0, 3},
+ .version = {1, 1, 0},
.module = THIS_MODULE,
.ctr = linear_ctr,
.dtr = linear_dtr,
@@ -142,6 +152,7 @@ static struct target_type linear_target = {
.status = linear_status,
.ioctl = linear_ioctl,
.merge = linear_merge,
+ .iterate_devices = linear_iterate_devices,
};
int __init dm_linear_init(void)
diff --git a/drivers/md/dm-log-userspace-base.c b/drivers/md/dm-log-userspace-base.c
new file mode 100644
index 000000000000..e69b96560997
--- /dev/null
+++ b/drivers/md/dm-log-userspace-base.c
@@ -0,0 +1,696 @@
+/*
+ * Copyright (C) 2006-2009 Red Hat, Inc.
+ *
+ * This file is released under the LGPL.
+ */
+
+#include <linux/bio.h>
+#include <linux/dm-dirty-log.h>
+#include <linux/device-mapper.h>
+#include <linux/dm-log-userspace.h>
+
+#include "dm-log-userspace-transfer.h"
+
+struct flush_entry {
+ int type;
+ region_t region;
+ struct list_head list;
+};
+
+struct log_c {
+ struct dm_target *ti;
+ uint32_t region_size;
+ region_t region_count;
+ char uuid[DM_UUID_LEN];
+
+ char *usr_argv_str;
+ uint32_t usr_argc;
+
+ /*
+ * in_sync_hint gets set when doing is_remote_recovering. It
+ * represents the first region that needs recovery. IOW, the
+ * first zero bit of sync_bits. This can be useful for to limit
+ * traffic for calls like is_remote_recovering and get_resync_work,
+ * but be take care in its use for anything else.
+ */
+ uint64_t in_sync_hint;
+
+ spinlock_t flush_lock;
+ struct list_head flush_list; /* only for clear and mark requests */
+};
+
+static mempool_t *flush_entry_pool;
+
+static void *flush_entry_alloc(gfp_t gfp_mask, void *pool_data)
+{
+ return kmalloc(sizeof(struct flush_entry), gfp_mask);
+}
+
+static void flush_entry_free(void *element, void *pool_data)
+{
+ kfree(element);
+}
+
+static int userspace_do_request(struct log_c *lc, const char *uuid,
+ int request_type, char *data, size_t data_size,
+ char *rdata, size_t *rdata_size)
+{
+ int r;
+
+ /*
+ * If the server isn't there, -ESRCH is returned,
+ * and we must keep trying until the server is
+ * restored.
+ */
+retry:
+ r = dm_consult_userspace(uuid, request_type, data,
+ data_size, rdata, rdata_size);
+
+ if (r != -ESRCH)
+ return r;
+
+ DMERR(" Userspace log server not found.");
+ while (1) {
+ set_current_state(TASK_INTERRUPTIBLE);
+ schedule_timeout(2*HZ);
+ DMWARN("Attempting to contact userspace log server...");
+ r = dm_consult_userspace(uuid, DM_ULOG_CTR, lc->usr_argv_str,
+ strlen(lc->usr_argv_str) + 1,
+ NULL, NULL);
+ if (!r)
+ break;
+ }
+ DMINFO("Reconnected to userspace log server... DM_ULOG_CTR complete");
+ r = dm_consult_userspace(uuid, DM_ULOG_RESUME, NULL,
+ 0, NULL, NULL);
+ if (!r)
+ goto retry;
+
+ DMERR("Error trying to resume userspace log: %d", r);
+
+ return -ESRCH;
+}
+
+static int build_constructor_string(struct dm_target *ti,
+ unsigned argc, char **argv,
+ char **ctr_str)
+{
+ int i, str_size;
+ char *str = NULL;
+
+ *ctr_str = NULL;
+
+ for (i = 0, str_size = 0; i < argc; i++)
+ str_size += strlen(argv[i]) + 1; /* +1 for space between args */
+
+ str_size += 20; /* Max number of chars in a printed u64 number */
+
+ str = kzalloc(str_size, GFP_KERNEL);
+ if (!str) {
+ DMWARN("Unable to allocate memory for constructor string");
+ return -ENOMEM;
+ }
+
+ for (i = 0, str_size = 0; i < argc; i++)
+ str_size += sprintf(str + str_size, "%s ", argv[i]);
+ str_size += sprintf(str + str_size, "%llu",
+ (unsigned long long)ti->len);
+
+ *ctr_str = str;
+ return str_size;
+}
+
+/*
+ * userspace_ctr
+ *
+ * argv contains:
+ * <UUID> <other args>
+ * Where 'other args' is the userspace implementation specific log
+ * arguments. An example might be:
+ * <UUID> clustered_disk <arg count> <log dev> <region_size> [[no]sync]
+ *
+ * So, this module will strip off the <UUID> for identification purposes
+ * when communicating with userspace about a log; but will pass on everything
+ * else.
+ */
+static int userspace_ctr(struct dm_dirty_log *log, struct dm_target *ti,
+ unsigned argc, char **argv)
+{
+ int r = 0;
+ int str_size;
+ char *ctr_str = NULL;
+ struct log_c *lc = NULL;
+ uint64_t rdata;
+ size_t rdata_size = sizeof(rdata);
+
+ if (argc < 3) {
+ DMWARN("Too few arguments to userspace dirty log");
+ return -EINVAL;
+ }
+
+ lc = kmalloc(sizeof(*lc), GFP_KERNEL);
+ if (!lc) {
+ DMWARN("Unable to allocate userspace log context.");
+ return -ENOMEM;
+ }
+
+ lc->ti = ti;
+
+ if (strlen(argv[0]) > (DM_UUID_LEN - 1)) {
+ DMWARN("UUID argument too long.");
+ kfree(lc);
+ return -EINVAL;
+ }
+
+ strncpy(lc->uuid, argv[0], DM_UUID_LEN);
+ spin_lock_init(&lc->flush_lock);
+ INIT_LIST_HEAD(&lc->flush_list);
+
+ str_size = build_constructor_string(ti, argc - 1, argv + 1, &ctr_str);
+ if (str_size < 0) {
+ kfree(lc);
+ return str_size;
+ }
+
+ /* Send table string */
+ r = dm_consult_userspace(lc->uuid, DM_ULOG_CTR,
+ ctr_str, str_size, NULL, NULL);
+
+ if (r == -ESRCH) {
+ DMERR("Userspace log server not found");
+ goto out;
+ }
+
+ /* Since the region size does not change, get it now */
+ rdata_size = sizeof(rdata);
+ r = dm_consult_userspace(lc->uuid, DM_ULOG_GET_REGION_SIZE,
+ NULL, 0, (char *)&rdata, &rdata_size);
+
+ if (r) {
+ DMERR("Failed to get region size of dirty log");
+ goto out;
+ }
+
+ lc->region_size = (uint32_t)rdata;
+ lc->region_count = dm_sector_div_up(ti->len, lc->region_size);
+
+out:
+ if (r) {
+ kfree(lc);
+ kfree(ctr_str);
+ } else {
+ lc->usr_argv_str = ctr_str;
+ lc->usr_argc = argc;
+ log->context = lc;
+ }
+
+ return r;
+}
+
+static void userspace_dtr(struct dm_dirty_log *log)
+{
+ int r;
+ struct log_c *lc = log->context;
+
+ r = dm_consult_userspace(lc->uuid, DM_ULOG_DTR,
+ NULL, 0,
+ NULL, NULL);
+
+ kfree(lc->usr_argv_str);
+ kfree(lc);
+
+ return;
+}
+
+static int userspace_presuspend(struct dm_dirty_log *log)
+{
+ int r;
+ struct log_c *lc = log->context;
+
+ r = dm_consult_userspace(lc->uuid, DM_ULOG_PRESUSPEND,
+ NULL, 0,
+ NULL, NULL);
+
+ return r;
+}
+
+static int userspace_postsuspend(struct dm_dirty_log *log)
+{
+ int r;
+ struct log_c *lc = log->context;
+
+ r = dm_consult_userspace(lc->uuid, DM_ULOG_POSTSUSPEND,
+ NULL, 0,
+ NULL, NULL);
+
+ return r;
+}
+
+static int userspace_resume(struct dm_dirty_log *log)
+{
+ int r;
+ struct log_c *lc = log->context;
+
+ lc->in_sync_hint = 0;
+ r = dm_consult_userspace(lc->uuid, DM_ULOG_RESUME,
+ NULL, 0,
+ NULL, NULL);
+
+ return r;
+}
+
+static uint32_t userspace_get_region_size(struct dm_dirty_log *log)
+{
+ struct log_c *lc = log->context;
+
+ return lc->region_size;
+}
+
+/*
+ * userspace_is_clean
+ *
+ * Check whether a region is clean. If there is any sort of
+ * failure when consulting the server, we return not clean.
+ *
+ * Returns: 1 if clean, 0 otherwise
+ */
+static int userspace_is_clean(struct dm_dirty_log *log, region_t region)
+{
+ int r;
+ uint64_t region64 = (uint64_t)region;
+ int64_t is_clean;
+ size_t rdata_size;
+ struct log_c *lc = log->context;
+
+ rdata_size = sizeof(is_clean);
+ r = userspace_do_request(lc, lc->uuid, DM_ULOG_IS_CLEAN,
+ (char *)&region64, sizeof(region64),
+ (char *)&is_clean, &rdata_size);
+
+ return (r) ? 0 : (int)is_clean;
+}
+
+/*
+ * userspace_in_sync
+ *
+ * Check if the region is in-sync. If there is any sort
+ * of failure when consulting the server, we assume that
+ * the region is not in sync.
+ *
+ * If 'can_block' is set, return immediately
+ *
+ * Returns: 1 if in-sync, 0 if not-in-sync, -EWOULDBLOCK
+ */
+static int userspace_in_sync(struct dm_dirty_log *log, region_t region,
+ int can_block)
+{
+ int r;
+ uint64_t region64 = region;
+ int64_t in_sync;
+ size_t rdata_size;
+ struct log_c *lc = log->context;
+
+ /*
+ * We can never respond directly - even if in_sync_hint is
+ * set. This is because another machine could see a device
+ * failure and mark the region out-of-sync. If we don't go
+ * to userspace to ask, we might think the region is in-sync
+ * and allow a read to pick up data that is stale. (This is
+ * very unlikely if a device actually fails; but it is very
+ * likely if a connection to one device from one machine fails.)
+ *
+ * There still might be a problem if the mirror caches the region
+ * state as in-sync... but then this call would not be made. So,
+ * that is a mirror problem.
+ */
+ if (!can_block)
+ return -EWOULDBLOCK;
+
+ rdata_size = sizeof(in_sync);
+ r = userspace_do_request(lc, lc->uuid, DM_ULOG_IN_SYNC,
+ (char *)&region64, sizeof(region64),
+ (char *)&in_sync, &rdata_size);
+ return (r) ? 0 : (int)in_sync;
+}
+
+/*
+ * userspace_flush
+ *
+ * This function is ok to block.
+ * The flush happens in two stages. First, it sends all
+ * clear/mark requests that are on the list. Then it
+ * tells the server to commit them. This gives the
+ * server a chance to optimise the commit, instead of
+ * doing it for every request.
+ *
+ * Additionally, we could implement another thread that
+ * sends the requests up to the server - reducing the
+ * load on flush. Then the flush would have less in
+ * the list and be responsible for the finishing commit.
+ *
+ * Returns: 0 on success, < 0 on failure
+ */
+static int userspace_flush(struct dm_dirty_log *log)
+{
+ int r = 0;
+ unsigned long flags;
+ struct log_c *lc = log->context;
+ LIST_HEAD(flush_list);
+ struct flush_entry *fe, *tmp_fe;
+
+ spin_lock_irqsave(&lc->flush_lock, flags);
+ list_splice_init(&lc->flush_list, &flush_list);
+ spin_unlock_irqrestore(&lc->flush_lock, flags);
+
+ if (list_empty(&flush_list))
+ return 0;
+
+ /*
+ * FIXME: Count up requests, group request types,
+ * allocate memory to stick all requests in and
+ * send to server in one go. Failing the allocation,
+ * do it one by one.
+ */
+
+ list_for_each_entry(fe, &flush_list, list) {
+ r = userspace_do_request(lc, lc->uuid, fe->type,
+ (char *)&fe->region,
+ sizeof(fe->region),
+ NULL, NULL);
+ if (r)
+ goto fail;
+ }
+
+ r = userspace_do_request(lc, lc->uuid, DM_ULOG_FLUSH,
+ NULL, 0, NULL, NULL);
+
+fail:
+ /*
+ * We can safely remove these entries, even if failure.
+ * Calling code will receive an error and will know that
+ * the log facility has failed.
+ */
+ list_for_each_entry_safe(fe, tmp_fe, &flush_list, list) {
+ list_del(&fe->list);
+ mempool_free(fe, flush_entry_pool);
+ }
+
+ if (r)
+ dm_table_event(lc->ti->table);
+
+ return r;
+}
+
+/*
+ * userspace_mark_region
+ *
+ * This function should avoid blocking unless absolutely required.
+ * (Memory allocation is valid for blocking.)
+ */
+static void userspace_mark_region(struct dm_dirty_log *log, region_t region)
+{
+ unsigned long flags;
+ struct log_c *lc = log->context;
+ struct flush_entry *fe;
+
+ /* Wait for an allocation, but _never_ fail */
+ fe = mempool_alloc(flush_entry_pool, GFP_NOIO);
+ BUG_ON(!fe);
+
+ spin_lock_irqsave(&lc->flush_lock, flags);
+ fe->type = DM_ULOG_MARK_REGION;
+ fe->region = region;
+ list_add(&fe->list, &lc->flush_list);
+ spin_unlock_irqrestore(&lc->flush_lock, flags);
+
+ return;
+}
+
+/*
+ * userspace_clear_region
+ *
+ * This function must not block.
+ * So, the alloc can't block. In the worst case, it is ok to
+ * fail. It would simply mean we can't clear the region.
+ * Does nothing to current sync context, but does mean
+ * the region will be re-sync'ed on a reload of the mirror
+ * even though it is in-sync.
+ */
+static void userspace_clear_region(struct dm_dirty_log *log, region_t region)
+{
+ unsigned long flags;
+ struct log_c *lc = log->context;
+ struct flush_entry *fe;
+
+ /*
+ * If we fail to allocate, we skip the clearing of
+ * the region. This doesn't hurt us in any way, except
+ * to cause the region to be resync'ed when the
+ * device is activated next time.
+ */
+ fe = mempool_alloc(flush_entry_pool, GFP_ATOMIC);
+ if (!fe) {
+ DMERR("Failed to allocate memory to clear region.");
+ return;
+ }
+
+ spin_lock_irqsave(&lc->flush_lock, flags);
+ fe->type = DM_ULOG_CLEAR_REGION;
+ fe->region = region;
+ list_add(&fe->list, &lc->flush_list);
+ spin_unlock_irqrestore(&lc->flush_lock, flags);
+
+ return;
+}
+
+/*
+ * userspace_get_resync_work
+ *
+ * Get a region that needs recovery. It is valid to return
+ * an error for this function.
+ *
+ * Returns: 1 if region filled, 0 if no work, <0 on error
+ */
+static int userspace_get_resync_work(struct dm_dirty_log *log, region_t *region)
+{
+ int r;
+ size_t rdata_size;
+ struct log_c *lc = log->context;
+ struct {
+ int64_t i; /* 64-bit for mix arch compatibility */
+ region_t r;
+ } pkg;
+
+ if (lc->in_sync_hint >= lc->region_count)
+ return 0;
+
+ rdata_size = sizeof(pkg);
+ r = userspace_do_request(lc, lc->uuid, DM_ULOG_GET_RESYNC_WORK,
+ NULL, 0,
+ (char *)&pkg, &rdata_size);
+
+ *region = pkg.r;
+ return (r) ? r : (int)pkg.i;
+}
+
+/*
+ * userspace_set_region_sync
+ *
+ * Set the sync status of a given region. This function
+ * must not fail.
+ */
+static void userspace_set_region_sync(struct dm_dirty_log *log,
+ region_t region, int in_sync)
+{
+ int r;
+ struct log_c *lc = log->context;
+ struct {
+ region_t r;
+ int64_t i;
+ } pkg;
+
+ pkg.r = region;
+ pkg.i = (int64_t)in_sync;
+
+ r = userspace_do_request(lc, lc->uuid, DM_ULOG_SET_REGION_SYNC,
+ (char *)&pkg, sizeof(pkg),
+ NULL, NULL);
+
+ /*
+ * It would be nice to be able to report failures.
+ * However, it is easy emough to detect and resolve.
+ */
+ return;
+}
+
+/*
+ * userspace_get_sync_count
+ *
+ * If there is any sort of failure when consulting the server,
+ * we assume that the sync count is zero.
+ *
+ * Returns: sync count on success, 0 on failure
+ */
+static region_t userspace_get_sync_count(struct dm_dirty_log *log)
+{
+ int r;
+ size_t rdata_size;
+ uint64_t sync_count;
+ struct log_c *lc = log->context;
+
+ rdata_size = sizeof(sync_count);
+ r = userspace_do_request(lc, lc->uuid, DM_ULOG_GET_SYNC_COUNT,
+ NULL, 0,
+ (char *)&sync_count, &rdata_size);
+
+ if (r)
+ return 0;
+
+ if (sync_count >= lc->region_count)
+ lc->in_sync_hint = lc->region_count;
+
+ return (region_t)sync_count;
+}
+
+/*
+ * userspace_status
+ *
+ * Returns: amount of space consumed
+ */
+static int userspace_status(struct dm_dirty_log *log, status_type_t status_type,
+ char *result, unsigned maxlen)
+{
+ int r = 0;
+ size_t sz = (size_t)maxlen;
+ struct log_c *lc = log->context;
+
+ switch (status_type) {
+ case STATUSTYPE_INFO:
+ r = userspace_do_request(lc, lc->uuid, DM_ULOG_STATUS_INFO,
+ NULL, 0,
+ result, &sz);
+
+ if (r) {
+ sz = 0;
+ DMEMIT("%s 1 COM_FAILURE", log->type->name);
+ }
+ break;
+ case STATUSTYPE_TABLE:
+ sz = 0;
+ DMEMIT("%s %u %s %s", log->type->name, lc->usr_argc + 1,
+ lc->uuid, lc->usr_argv_str);
+ break;
+ }
+ return (r) ? 0 : (int)sz;
+}
+
+/*
+ * userspace_is_remote_recovering
+ *
+ * Returns: 1 if region recovering, 0 otherwise
+ */
+static int userspace_is_remote_recovering(struct dm_dirty_log *log,
+ region_t region)
+{
+ int r;
+ uint64_t region64 = region;
+ struct log_c *lc = log->context;
+ static unsigned long long limit;
+ struct {
+ int64_t is_recovering;
+ uint64_t in_sync_hint;
+ } pkg;
+ size_t rdata_size = sizeof(pkg);
+
+ /*
+ * Once the mirror has been reported to be in-sync,
+ * it will never again ask for recovery work. So,
+ * we can safely say there is not a remote machine
+ * recovering if the device is in-sync. (in_sync_hint
+ * must be reset at resume time.)
+ */
+ if (region < lc->in_sync_hint)
+ return 0;
+ else if (jiffies < limit)
+ return 1;
+
+ limit = jiffies + (HZ / 4);
+ r = userspace_do_request(lc, lc->uuid, DM_ULOG_IS_REMOTE_RECOVERING,
+ (char *)&region64, sizeof(region64),
+ (char *)&pkg, &rdata_size);
+ if (r)
+ return 1;
+
+ lc->in_sync_hint = pkg.in_sync_hint;
+
+ return (int)pkg.is_recovering;
+}
+
+static struct dm_dirty_log_type _userspace_type = {
+ .name = "userspace",
+ .module = THIS_MODULE,
+ .ctr = userspace_ctr,
+ .dtr = userspace_dtr,
+ .presuspend = userspace_presuspend,
+ .postsuspend = userspace_postsuspend,
+ .resume = userspace_resume,
+ .get_region_size = userspace_get_region_size,
+ .is_clean = userspace_is_clean,
+ .in_sync = userspace_in_sync,
+ .flush = userspace_flush,
+ .mark_region = userspace_mark_region,
+ .clear_region = userspace_clear_region,
+ .get_resync_work = userspace_get_resync_work,
+ .set_region_sync = userspace_set_region_sync,
+ .get_sync_count = userspace_get_sync_count,
+ .status = userspace_status,
+ .is_remote_recovering = userspace_is_remote_recovering,
+};
+
+static int __init userspace_dirty_log_init(void)
+{
+ int r = 0;
+
+ flush_entry_pool = mempool_create(100, flush_entry_alloc,
+ flush_entry_free, NULL);
+
+ if (!flush_entry_pool) {
+ DMWARN("Unable to create flush_entry_pool: No memory.");
+ return -ENOMEM;
+ }
+
+ r = dm_ulog_tfr_init();
+ if (r) {
+ DMWARN("Unable to initialize userspace log communications");
+ mempool_destroy(flush_entry_pool);
+ return r;
+ }
+
+ r = dm_dirty_log_type_register(&_userspace_type);
+ if (r) {
+ DMWARN("Couldn't register userspace dirty log type");
+ dm_ulog_tfr_exit();
+ mempool_destroy(flush_entry_pool);
+ return r;
+ }
+
+ DMINFO("version 1.0.0 loaded");
+ return 0;
+}
+
+static void __exit userspace_dirty_log_exit(void)
+{
+ dm_dirty_log_type_unregister(&_userspace_type);
+ dm_ulog_tfr_exit();
+ mempool_destroy(flush_entry_pool);
+
+ DMINFO("version 1.0.0 unloaded");
+ return;
+}
+
+module_init(userspace_dirty_log_init);
+module_exit(userspace_dirty_log_exit);
+
+MODULE_DESCRIPTION(DM_NAME " userspace dirty log link");
+MODULE_AUTHOR("Jonathan Brassow <dm-devel@redhat.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/md/dm-log-userspace-transfer.c b/drivers/md/dm-log-userspace-transfer.c
new file mode 100644
index 000000000000..0ca1ee768a1f
--- /dev/null
+++ b/drivers/md/dm-log-userspace-transfer.c
@@ -0,0 +1,276 @@
+/*
+ * Copyright (C) 2006-2009 Red Hat, Inc.
+ *
+ * This file is released under the LGPL.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <net/sock.h>
+#include <linux/workqueue.h>
+#include <linux/connector.h>
+#include <linux/device-mapper.h>
+#include <linux/dm-log-userspace.h>
+
+#include "dm-log-userspace-transfer.h"
+
+static uint32_t dm_ulog_seq;
+
+/*
+ * Netlink/Connector is an unreliable protocol. How long should
+ * we wait for a response before assuming it was lost and retrying?
+ * (If we do receive a response after this time, it will be discarded
+ * and the response to the resent request will be waited for.
+ */
+#define DM_ULOG_RETRY_TIMEOUT (15 * HZ)
+
+/*
+ * Pre-allocated space for speed
+ */
+#define DM_ULOG_PREALLOCED_SIZE 512
+static struct cn_msg *prealloced_cn_msg;
+static struct dm_ulog_request *prealloced_ulog_tfr;
+
+static struct cb_id ulog_cn_id = {
+ .idx = CN_IDX_DM,
+ .val = CN_VAL_DM_USERSPACE_LOG
+};
+
+static DEFINE_MUTEX(dm_ulog_lock);
+
+struct receiving_pkg {
+ struct list_head list;
+ struct completion complete;
+
+ uint32_t seq;
+
+ int error;
+ size_t *data_size;
+ char *data;
+};
+
+static DEFINE_SPINLOCK(receiving_list_lock);
+static struct list_head receiving_list;
+
+static int dm_ulog_sendto_server(struct dm_ulog_request *tfr)
+{
+ int r;
+ struct cn_msg *msg = prealloced_cn_msg;
+
+ memset(msg, 0, sizeof(struct cn_msg));
+
+ msg->id.idx = ulog_cn_id.idx;
+ msg->id.val = ulog_cn_id.val;
+ msg->ack = 0;
+ msg->seq = tfr->seq;
+ msg->len = sizeof(struct dm_ulog_request) + tfr->data_size;
+
+ r = cn_netlink_send(msg, 0, gfp_any());
+
+ return r;
+}
+
+/*
+ * Parameters for this function can be either msg or tfr, but not
+ * both. This function fills in the reply for a waiting request.
+ * If just msg is given, then the reply is simply an ACK from userspace
+ * that the request was received.
+ *
+ * Returns: 0 on success, -ENOENT on failure
+ */
+static int fill_pkg(struct cn_msg *msg, struct dm_ulog_request *tfr)
+{
+ uint32_t rtn_seq = (msg) ? msg->seq : (tfr) ? tfr->seq : 0;
+ struct receiving_pkg *pkg;
+
+ /*
+ * The 'receiving_pkg' entries in this list are statically
+ * allocated on the stack in 'dm_consult_userspace'.
+ * Each process that is waiting for a reply from the user
+ * space server will have an entry in this list.
+ *
+ * We are safe to do it this way because the stack space
+ * is unique to each process, but still addressable by
+ * other processes.
+ */
+ list_for_each_entry(pkg, &receiving_list, list) {
+ if (rtn_seq != pkg->seq)
+ continue;
+
+ if (msg) {
+ pkg->error = -msg->ack;
+ /*
+ * If we are trying again, we will need to know our
+ * storage capacity. Otherwise, along with the
+ * error code, we make explicit that we have no data.
+ */
+ if (pkg->error != -EAGAIN)
+ *(pkg->data_size) = 0;
+ } else if (tfr->data_size > *(pkg->data_size)) {
+ DMERR("Insufficient space to receive package [%u] "
+ "(%u vs %lu)", tfr->request_type,
+ tfr->data_size, *(pkg->data_size));
+
+ *(pkg->data_size) = 0;
+ pkg->error = -ENOSPC;
+ } else {
+ pkg->error = tfr->error;
+ memcpy(pkg->data, tfr->data, tfr->data_size);
+ *(pkg->data_size) = tfr->data_size;
+ }
+ complete(&pkg->complete);
+ return 0;
+ }
+
+ return -ENOENT;
+}
+
+/*
+ * This is the connector callback that delivers data
+ * that was sent from userspace.
+ */
+static void cn_ulog_callback(void *data)
+{
+ struct cn_msg *msg = (struct cn_msg *)data;
+ struct dm_ulog_request *tfr = (struct dm_ulog_request *)(msg + 1);
+
+ spin_lock(&receiving_list_lock);
+ if (msg->len == 0)
+ fill_pkg(msg, NULL);
+ else if (msg->len < sizeof(*tfr))
+ DMERR("Incomplete message received (expected %u, got %u): [%u]",
+ (unsigned)sizeof(*tfr), msg->len, msg->seq);
+ else
+ fill_pkg(NULL, tfr);
+ spin_unlock(&receiving_list_lock);
+}
+
+/**
+ * dm_consult_userspace
+ * @uuid: log's uuid (must be DM_UUID_LEN in size)
+ * @request_type: found in include/linux/dm-log-userspace.h
+ * @data: data to tx to the server
+ * @data_size: size of data in bytes
+ * @rdata: place to put return data from server
+ * @rdata_size: value-result (amount of space given/amount of space used)
+ *
+ * rdata_size is undefined on failure.
+ *
+ * Memory used to communicate with userspace is zero'ed
+ * before populating to ensure that no unwanted bits leak
+ * from kernel space to user-space. All userspace log communications
+ * between kernel and user space go through this function.
+ *
+ * Returns: 0 on success, -EXXX on failure
+ **/
+int dm_consult_userspace(const char *uuid, int request_type,
+ char *data, size_t data_size,
+ char *rdata, size_t *rdata_size)
+{
+ int r = 0;
+ size_t dummy = 0;
+ int overhead_size =
+ sizeof(struct dm_ulog_request *) + sizeof(struct cn_msg);
+ struct dm_ulog_request *tfr = prealloced_ulog_tfr;
+ struct receiving_pkg pkg;
+
+ if (data_size > (DM_ULOG_PREALLOCED_SIZE - overhead_size)) {
+ DMINFO("Size of tfr exceeds preallocated size");
+ return -EINVAL;
+ }
+
+ if (!rdata_size)
+ rdata_size = &dummy;
+resend:
+ /*
+ * We serialize the sending of requests so we can
+ * use the preallocated space.
+ */
+ mutex_lock(&dm_ulog_lock);
+
+ memset(tfr, 0, DM_ULOG_PREALLOCED_SIZE - overhead_size);
+ memcpy(tfr->uuid, uuid, DM_UUID_LEN);
+ tfr->seq = dm_ulog_seq++;
+
+ /*
+ * Must be valid request type (all other bits set to
+ * zero). This reserves other bits for possible future
+ * use.
+ */
+ tfr->request_type = request_type & DM_ULOG_REQUEST_MASK;
+
+ tfr->data_size = data_size;
+ if (data && data_size)
+ memcpy(tfr->data, data, data_size);
+
+ memset(&pkg, 0, sizeof(pkg));
+ init_completion(&pkg.complete);
+ pkg.seq = tfr->seq;
+ pkg.data_size = rdata_size;
+ pkg.data = rdata;
+ spin_lock(&receiving_list_lock);
+ list_add(&(pkg.list), &receiving_list);
+ spin_unlock(&receiving_list_lock);
+
+ r = dm_ulog_sendto_server(tfr);
+
+ mutex_unlock(&dm_ulog_lock);
+
+ if (r) {
+ DMERR("Unable to send log request [%u] to userspace: %d",
+ request_type, r);
+ spin_lock(&receiving_list_lock);
+ list_del_init(&(pkg.list));
+ spin_unlock(&receiving_list_lock);
+
+ goto out;
+ }
+
+ r = wait_for_completion_timeout(&(pkg.complete), DM_ULOG_RETRY_TIMEOUT);
+ spin_lock(&receiving_list_lock);
+ list_del_init(&(pkg.list));
+ spin_unlock(&receiving_list_lock);
+ if (!r) {
+ DMWARN("[%s] Request timed out: [%u/%u] - retrying",
+ (strlen(uuid) > 8) ?
+ (uuid + (strlen(uuid) - 8)) : (uuid),
+ request_type, pkg.seq);
+ goto resend;
+ }
+
+ r = pkg.error;
+ if (r == -EAGAIN)
+ goto resend;
+
+out:
+ return r;
+}
+
+int dm_ulog_tfr_init(void)
+{
+ int r;
+ void *prealloced;
+
+ INIT_LIST_HEAD(&receiving_list);
+
+ prealloced = kmalloc(DM_ULOG_PREALLOCED_SIZE, GFP_KERNEL);
+ if (!prealloced)
+ return -ENOMEM;
+
+ prealloced_cn_msg = prealloced;
+ prealloced_ulog_tfr = prealloced + sizeof(struct cn_msg);
+
+ r = cn_add_callback(&ulog_cn_id, "dmlogusr", cn_ulog_callback);
+ if (r) {
+ cn_del_callback(&ulog_cn_id);
+ return r;
+ }
+
+ return 0;
+}
+
+void dm_ulog_tfr_exit(void)
+{
+ cn_del_callback(&ulog_cn_id);
+ kfree(prealloced_cn_msg);
+}
diff --git a/drivers/md/dm-log-userspace-transfer.h b/drivers/md/dm-log-userspace-transfer.h
new file mode 100644
index 000000000000..c26d8e4e2710
--- /dev/null
+++ b/drivers/md/dm-log-userspace-transfer.h
@@ -0,0 +1,18 @@
+/*
+ * Copyright (C) 2006-2009 Red Hat, Inc.
+ *
+ * This file is released under the LGPL.
+ */
+
+#ifndef __DM_LOG_USERSPACE_TRANSFER_H__
+#define __DM_LOG_USERSPACE_TRANSFER_H__
+
+#define DM_MSG_PREFIX "dm-log-userspace"
+
+int dm_ulog_tfr_init(void);
+void dm_ulog_tfr_exit(void);
+int dm_consult_userspace(const char *uuid, int request_type,
+ char *data, size_t data_size,
+ char *rdata, size_t *rdata_size);
+
+#endif /* __DM_LOG_USERSPACE_TRANSFER_H__ */
diff --git a/drivers/md/dm-log.c b/drivers/md/dm-log.c
index 6fa8ccf91c70..9443896ede07 100644
--- a/drivers/md/dm-log.c
+++ b/drivers/md/dm-log.c
@@ -412,11 +412,12 @@ static int create_log_context(struct dm_dirty_log *log, struct dm_target *ti,
/*
* Buffer holds both header and bitset.
*/
- buf_size = dm_round_up((LOG_OFFSET << SECTOR_SHIFT) +
- bitset_size,
- ti->limits.logical_block_size);
+ buf_size =
+ dm_round_up((LOG_OFFSET << SECTOR_SHIFT) + bitset_size,
+ bdev_logical_block_size(lc->header_location.
+ bdev));
- if (buf_size > dev->bdev->bd_inode->i_size) {
+ if (buf_size > i_size_read(dev->bdev->bd_inode)) {
DMWARN("log device %s too small: need %llu bytes",
dev->name, (unsigned long long)buf_size);
kfree(lc);
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index 6a386ab4f7eb..c70604a20897 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -8,7 +8,6 @@
#include <linux/device-mapper.h>
#include "dm-path-selector.h"
-#include "dm-bio-record.h"
#include "dm-uevent.h"
#include <linux/ctype.h>
@@ -35,6 +34,7 @@ struct pgpath {
struct dm_path path;
struct work_struct deactivate_path;
+ struct work_struct activate_path;
};
#define path_to_pgpath(__pgp) container_of((__pgp), struct pgpath, path)
@@ -64,8 +64,6 @@ struct multipath {
spinlock_t lock;
const char *hw_handler_name;
- struct work_struct activate_path;
- struct pgpath *pgpath_to_activate;
unsigned nr_priority_groups;
struct list_head priority_groups;
unsigned pg_init_required; /* pg_init needs calling? */
@@ -84,7 +82,7 @@ struct multipath {
unsigned pg_init_count; /* Number of times pg_init called */
struct work_struct process_queued_ios;
- struct bio_list queued_ios;
+ struct list_head queued_ios;
unsigned queue_size;
struct work_struct trigger_event;
@@ -101,7 +99,7 @@ struct multipath {
*/
struct dm_mpath_io {
struct pgpath *pgpath;
- struct dm_bio_details details;
+ size_t nr_bytes;
};
typedef int (*action_fn) (struct pgpath *pgpath);
@@ -128,6 +126,7 @@ static struct pgpath *alloc_pgpath(void)
if (pgpath) {
pgpath->is_active = 1;
INIT_WORK(&pgpath->deactivate_path, deactivate_path);
+ INIT_WORK(&pgpath->activate_path, activate_path);
}
return pgpath;
@@ -160,7 +159,6 @@ static struct priority_group *alloc_priority_group(void)
static void free_pgpaths(struct list_head *pgpaths, struct dm_target *ti)
{
- unsigned long flags;
struct pgpath *pgpath, *tmp;
struct multipath *m = ti->private;
@@ -169,10 +167,6 @@ static void free_pgpaths(struct list_head *pgpaths, struct dm_target *ti)
if (m->hw_handler_name)
scsi_dh_detach(bdev_get_queue(pgpath->path.dev->bdev));
dm_put_device(ti, pgpath->path.dev);
- spin_lock_irqsave(&m->lock, flags);
- if (m->pgpath_to_activate == pgpath)
- m->pgpath_to_activate = NULL;
- spin_unlock_irqrestore(&m->lock, flags);
free_pgpath(pgpath);
}
}
@@ -198,11 +192,11 @@ static struct multipath *alloc_multipath(struct dm_target *ti)
m = kzalloc(sizeof(*m), GFP_KERNEL);
if (m) {
INIT_LIST_HEAD(&m->priority_groups);
+ INIT_LIST_HEAD(&m->queued_ios);
spin_lock_init(&m->lock);
m->queue_io = 1;
INIT_WORK(&m->process_queued_ios, process_queued_ios);
INIT_WORK(&m->trigger_event, trigger_event);
- INIT_WORK(&m->activate_path, activate_path);
m->mpio_pool = mempool_create_slab_pool(MIN_IOS, _mpio_cache);
if (!m->mpio_pool) {
kfree(m);
@@ -250,11 +244,12 @@ static void __switch_pg(struct multipath *m, struct pgpath *pgpath)
m->pg_init_count = 0;
}
-static int __choose_path_in_pg(struct multipath *m, struct priority_group *pg)
+static int __choose_path_in_pg(struct multipath *m, struct priority_group *pg,
+ size_t nr_bytes)
{
struct dm_path *path;
- path = pg->ps.type->select_path(&pg->ps, &m->repeat_count);
+ path = pg->ps.type->select_path(&pg->ps, &m->repeat_count, nr_bytes);
if (!path)
return -ENXIO;
@@ -266,7 +261,7 @@ static int __choose_path_in_pg(struct multipath *m, struct priority_group *pg)
return 0;
}
-static void __choose_pgpath(struct multipath *m)
+static void __choose_pgpath(struct multipath *m, size_t nr_bytes)
{
struct priority_group *pg;
unsigned bypassed = 1;
@@ -278,12 +273,12 @@ static void __choose_pgpath(struct multipath *m)
if (m->next_pg) {
pg = m->next_pg;
m->next_pg = NULL;
- if (!__choose_path_in_pg(m, pg))
+ if (!__choose_path_in_pg(m, pg, nr_bytes))
return;
}
/* Don't change PG until it has no remaining paths */
- if (m->current_pg && !__choose_path_in_pg(m, m->current_pg))
+ if (m->current_pg && !__choose_path_in_pg(m, m->current_pg, nr_bytes))
return;
/*
@@ -295,7 +290,7 @@ static void __choose_pgpath(struct multipath *m)
list_for_each_entry(pg, &m->priority_groups, list) {
if (pg->bypassed == bypassed)
continue;
- if (!__choose_path_in_pg(m, pg))
+ if (!__choose_path_in_pg(m, pg, nr_bytes))
return;
}
} while (bypassed--);
@@ -322,19 +317,21 @@ static int __must_push_back(struct multipath *m)
dm_noflush_suspending(m->ti));
}
-static int map_io(struct multipath *m, struct bio *bio,
+static int map_io(struct multipath *m, struct request *clone,
struct dm_mpath_io *mpio, unsigned was_queued)
{
int r = DM_MAPIO_REMAPPED;
+ size_t nr_bytes = blk_rq_bytes(clone);
unsigned long flags;
struct pgpath *pgpath;
+ struct block_device *bdev;
spin_lock_irqsave(&m->lock, flags);
/* Do we need to select a new pgpath? */
if (!m->current_pgpath ||
(!m->queue_io && (m->repeat_count && --m->repeat_count == 0)))
- __choose_pgpath(m);
+ __choose_pgpath(m, nr_bytes);
pgpath = m->current_pgpath;
@@ -344,21 +341,28 @@ static int map_io(struct multipath *m, struct bio *bio,
if ((pgpath && m->queue_io) ||
(!pgpath && m->queue_if_no_path)) {
/* Queue for the daemon to resubmit */
- bio_list_add(&m->queued_ios, bio);
+ list_add_tail(&clone->queuelist, &m->queued_ios);
m->queue_size++;
if ((m->pg_init_required && !m->pg_init_in_progress) ||
!m->queue_io)
queue_work(kmultipathd, &m->process_queued_ios);
pgpath = NULL;
r = DM_MAPIO_SUBMITTED;
- } else if (pgpath)
- bio->bi_bdev = pgpath->path.dev->bdev;
- else if (__must_push_back(m))
+ } else if (pgpath) {
+ bdev = pgpath->path.dev->bdev;
+ clone->q = bdev_get_queue(bdev);
+ clone->rq_disk = bdev->bd_disk;
+ } else if (__must_push_back(m))
r = DM_MAPIO_REQUEUE;
else
r = -EIO; /* Failed */
mpio->pgpath = pgpath;
+ mpio->nr_bytes = nr_bytes;
+
+ if (r == DM_MAPIO_REMAPPED && pgpath->pg->ps.type->start_io)
+ pgpath->pg->ps.type->start_io(&pgpath->pg->ps, &pgpath->path,
+ nr_bytes);
spin_unlock_irqrestore(&m->lock, flags);
@@ -396,30 +400,31 @@ static void dispatch_queued_ios(struct multipath *m)
{
int r;
unsigned long flags;
- struct bio *bio = NULL, *next;
struct dm_mpath_io *mpio;
union map_info *info;
+ struct request *clone, *n;
+ LIST_HEAD(cl);
spin_lock_irqsave(&m->lock, flags);
- bio = bio_list_get(&m->queued_ios);
+ list_splice_init(&m->queued_ios, &cl);
spin_unlock_irqrestore(&m->lock, flags);
- while (bio) {
- next = bio->bi_next;
- bio->bi_next = NULL;
+ list_for_each_entry_safe(clone, n, &cl, queuelist) {
+ list_del_init(&clone->queuelist);
- info = dm_get_mapinfo(bio);
+ info = dm_get_rq_mapinfo(clone);
mpio = info->ptr;
- r = map_io(m, bio, mpio, 1);
- if (r < 0)
- bio_endio(bio, r);
- else if (r == DM_MAPIO_REMAPPED)
- generic_make_request(bio);
- else if (r == DM_MAPIO_REQUEUE)
- bio_endio(bio, -EIO);
-
- bio = next;
+ r = map_io(m, clone, mpio, 1);
+ if (r < 0) {
+ mempool_free(mpio, m->mpio_pool);
+ dm_kill_unmapped_request(clone, r);
+ } else if (r == DM_MAPIO_REMAPPED)
+ dm_dispatch_request(clone);
+ else if (r == DM_MAPIO_REQUEUE) {
+ mempool_free(mpio, m->mpio_pool);
+ dm_requeue_unmapped_request(clone);
+ }
}
}
@@ -427,8 +432,8 @@ static void process_queued_ios(struct work_struct *work)
{
struct multipath *m =
container_of(work, struct multipath, process_queued_ios);
- struct pgpath *pgpath = NULL;
- unsigned init_required = 0, must_queue = 1;
+ struct pgpath *pgpath = NULL, *tmp;
+ unsigned must_queue = 1;
unsigned long flags;
spin_lock_irqsave(&m->lock, flags);
@@ -437,7 +442,7 @@ static void process_queued_ios(struct work_struct *work)
goto out;
if (!m->current_pgpath)
- __choose_pgpath(m);
+ __choose_pgpath(m, 0);
pgpath = m->current_pgpath;
@@ -446,19 +451,15 @@ static void process_queued_ios(struct work_struct *work)
must_queue = 0;
if (m->pg_init_required && !m->pg_init_in_progress && pgpath) {
- m->pgpath_to_activate = pgpath;
m->pg_init_count++;
m->pg_init_required = 0;
- m->pg_init_in_progress = 1;
- init_required = 1;
+ list_for_each_entry(tmp, &pgpath->pg->pgpaths, list) {
+ if (queue_work(kmpath_handlerd, &tmp->activate_path))
+ m->pg_init_in_progress++;
+ }
}
-
out:
spin_unlock_irqrestore(&m->lock, flags);
-
- if (init_required)
- queue_work(kmpath_handlerd, &m->activate_path);
-
if (!must_queue)
dispatch_queued_ios(m);
}
@@ -553,6 +554,12 @@ static int parse_path_selector(struct arg_set *as, struct priority_group *pg,
return -EINVAL;
}
+ if (ps_argc > as->argc) {
+ dm_put_path_selector(pst);
+ ti->error = "not enough arguments for path selector";
+ return -EINVAL;
+ }
+
r = pst->create(&pg->ps, ps_argc, as->argv);
if (r) {
dm_put_path_selector(pst);
@@ -591,9 +598,20 @@ static struct pgpath *parse_path(struct arg_set *as, struct path_selector *ps,
}
if (m->hw_handler_name) {
- r = scsi_dh_attach(bdev_get_queue(p->path.dev->bdev),
- m->hw_handler_name);
+ struct request_queue *q = bdev_get_queue(p->path.dev->bdev);
+
+ r = scsi_dh_attach(q, m->hw_handler_name);
+ if (r == -EBUSY) {
+ /*
+ * Already attached to different hw_handler,
+ * try to reattach with correct one.
+ */
+ scsi_dh_detach(q);
+ r = scsi_dh_attach(q, m->hw_handler_name);
+ }
+
if (r < 0) {
+ ti->error = "error attaching hardware handler";
dm_put_device(ti, p->path.dev);
goto bad;
}
@@ -699,6 +717,11 @@ static int parse_hw_handler(struct arg_set *as, struct multipath *m)
if (!hw_argc)
return 0;
+ if (hw_argc > as->argc) {
+ ti->error = "not enough arguments for hardware handler";
+ return -EINVAL;
+ }
+
m->hw_handler_name = kstrdup(shift(as), GFP_KERNEL);
request_module("scsi_dh_%s", m->hw_handler_name);
if (scsi_dh_handler_exist(m->hw_handler_name) == 0) {
@@ -823,6 +846,8 @@ static int multipath_ctr(struct dm_target *ti, unsigned int argc,
goto bad;
}
+ ti->num_flush_requests = 1;
+
return 0;
bad:
@@ -836,25 +861,29 @@ static void multipath_dtr(struct dm_target *ti)
flush_workqueue(kmpath_handlerd);
flush_workqueue(kmultipathd);
+ flush_scheduled_work();
free_multipath(m);
}
/*
- * Map bios, recording original fields for later in case we have to resubmit
+ * Map cloned requests
*/
-static int multipath_map(struct dm_target *ti, struct bio *bio,
+static int multipath_map(struct dm_target *ti, struct request *clone,
union map_info *map_context)
{
int r;
struct dm_mpath_io *mpio;
struct multipath *m = (struct multipath *) ti->private;
- mpio = mempool_alloc(m->mpio_pool, GFP_NOIO);
- dm_bio_record(&mpio->details, bio);
+ mpio = mempool_alloc(m->mpio_pool, GFP_ATOMIC);
+ if (!mpio)
+ /* ENOMEM, requeue */
+ return DM_MAPIO_REQUEUE;
+ memset(mpio, 0, sizeof(*mpio));
map_context->ptr = mpio;
- bio->bi_rw |= (1 << BIO_RW_FAILFAST_TRANSPORT);
- r = map_io(m, bio, mpio, 0);
+ clone->cmd_flags |= REQ_FAILFAST_TRANSPORT;
+ r = map_io(m, clone, mpio, 0);
if (r < 0 || r == DM_MAPIO_REQUEUE)
mempool_free(mpio, m->mpio_pool);
@@ -924,9 +953,13 @@ static int reinstate_path(struct pgpath *pgpath)
pgpath->is_active = 1;
- m->current_pgpath = NULL;
- if (!m->nr_valid_paths++ && m->queue_size)
+ if (!m->nr_valid_paths++ && m->queue_size) {
+ m->current_pgpath = NULL;
queue_work(kmultipathd, &m->process_queued_ios);
+ } else if (m->hw_handler_name && (m->current_pg == pgpath->pg)) {
+ if (queue_work(kmpath_handlerd, &pgpath->activate_path))
+ m->pg_init_in_progress++;
+ }
dm_path_uevent(DM_UEVENT_PATH_REINSTATED, m->ti,
pgpath->path.dev->name, m->nr_valid_paths);
@@ -1102,87 +1135,70 @@ static void pg_init_done(struct dm_path *path, int errors)
spin_lock_irqsave(&m->lock, flags);
if (errors) {
- DMERR("Could not failover device. Error %d.", errors);
- m->current_pgpath = NULL;
- m->current_pg = NULL;
+ if (pgpath == m->current_pgpath) {
+ DMERR("Could not failover device. Error %d.", errors);
+ m->current_pgpath = NULL;
+ m->current_pg = NULL;
+ }
} else if (!m->pg_init_required) {
m->queue_io = 0;
pg->bypassed = 0;
}
- m->pg_init_in_progress = 0;
- queue_work(kmultipathd, &m->process_queued_ios);
+ m->pg_init_in_progress--;
+ if (!m->pg_init_in_progress)
+ queue_work(kmultipathd, &m->process_queued_ios);
spin_unlock_irqrestore(&m->lock, flags);
}
static void activate_path(struct work_struct *work)
{
int ret;
- struct multipath *m =
- container_of(work, struct multipath, activate_path);
- struct dm_path *path;
- unsigned long flags;
+ struct pgpath *pgpath =
+ container_of(work, struct pgpath, activate_path);
- spin_lock_irqsave(&m->lock, flags);
- path = &m->pgpath_to_activate->path;
- m->pgpath_to_activate = NULL;
- spin_unlock_irqrestore(&m->lock, flags);
- if (!path)
- return;
- ret = scsi_dh_activate(bdev_get_queue(path->dev->bdev));
- pg_init_done(path, ret);
+ ret = scsi_dh_activate(bdev_get_queue(pgpath->path.dev->bdev));
+ pg_init_done(&pgpath->path, ret);
}
/*
* end_io handling
*/
-static int do_end_io(struct multipath *m, struct bio *bio,
+static int do_end_io(struct multipath *m, struct request *clone,
int error, struct dm_mpath_io *mpio)
{
+ /*
+ * We don't queue any clone request inside the multipath target
+ * during end I/O handling, since those clone requests don't have
+ * bio clones. If we queue them inside the multipath target,
+ * we need to make bio clones, that requires memory allocation.
+ * (See drivers/md/dm.c:end_clone_bio() about why the clone requests
+ * don't have bio clones.)
+ * Instead of queueing the clone request here, we queue the original
+ * request into dm core, which will remake a clone request and
+ * clone bios for it and resubmit it later.
+ */
+ int r = DM_ENDIO_REQUEUE;
unsigned long flags;
- if (!error)
+ if (!error && !clone->errors)
return 0; /* I/O complete */
- if ((error == -EWOULDBLOCK) && bio_rw_ahead(bio))
- return error;
-
if (error == -EOPNOTSUPP)
return error;
- spin_lock_irqsave(&m->lock, flags);
- if (!m->nr_valid_paths) {
- if (__must_push_back(m)) {
- spin_unlock_irqrestore(&m->lock, flags);
- return DM_ENDIO_REQUEUE;
- } else if (!m->queue_if_no_path) {
- spin_unlock_irqrestore(&m->lock, flags);
- return -EIO;
- } else {
- spin_unlock_irqrestore(&m->lock, flags);
- goto requeue;
- }
- }
- spin_unlock_irqrestore(&m->lock, flags);
-
if (mpio->pgpath)
fail_path(mpio->pgpath);
- requeue:
- dm_bio_restore(&mpio->details, bio);
-
- /* queue for the daemon to resubmit or fail */
spin_lock_irqsave(&m->lock, flags);
- bio_list_add(&m->queued_ios, bio);
- m->queue_size++;
- if (!m->queue_io)
- queue_work(kmultipathd, &m->process_queued_ios);
+ if (!m->nr_valid_paths && !m->queue_if_no_path && !__must_push_back(m))
+ r = -EIO;
spin_unlock_irqrestore(&m->lock, flags);
- return DM_ENDIO_INCOMPLETE; /* io not complete */
+ return r;
}
-static int multipath_end_io(struct dm_target *ti, struct bio *bio,
+static int multipath_end_io(struct dm_target *ti, struct request *clone,
int error, union map_info *map_context)
{
struct multipath *m = ti->private;
@@ -1191,14 +1207,13 @@ static int multipath_end_io(struct dm_target *ti, struct bio *bio,
struct path_selector *ps;
int r;
- r = do_end_io(m, bio, error, mpio);
+ r = do_end_io(m, clone, error, mpio);
if (pgpath) {
ps = &pgpath->pg->ps;
if (ps->type->end_io)
- ps->type->end_io(ps, &pgpath->path);
+ ps->type->end_io(ps, &pgpath->path, mpio->nr_bytes);
}
- if (r != DM_ENDIO_INCOMPLETE)
- mempool_free(mpio, m->mpio_pool);
+ mempool_free(mpio, m->mpio_pool);
return r;
}
@@ -1411,7 +1426,7 @@ static int multipath_ioctl(struct dm_target *ti, unsigned int cmd,
spin_lock_irqsave(&m->lock, flags);
if (!m->current_pgpath)
- __choose_pgpath(m);
+ __choose_pgpath(m, 0);
if (m->current_pgpath) {
bdev = m->current_pgpath->path.dev->bdev;
@@ -1428,22 +1443,113 @@ static int multipath_ioctl(struct dm_target *ti, unsigned int cmd,
return r ? : __blkdev_driver_ioctl(bdev, mode, cmd, arg);
}
+static int multipath_iterate_devices(struct dm_target *ti,
+ iterate_devices_callout_fn fn, void *data)
+{
+ struct multipath *m = ti->private;
+ struct priority_group *pg;
+ struct pgpath *p;
+ int ret = 0;
+
+ list_for_each_entry(pg, &m->priority_groups, list) {
+ list_for_each_entry(p, &pg->pgpaths, list) {
+ ret = fn(ti, p->path.dev, ti->begin, data);
+ if (ret)
+ goto out;
+ }
+ }
+
+out:
+ return ret;
+}
+
+static int __pgpath_busy(struct pgpath *pgpath)
+{
+ struct request_queue *q = bdev_get_queue(pgpath->path.dev->bdev);
+
+ return dm_underlying_device_busy(q);
+}
+
+/*
+ * We return "busy", only when we can map I/Os but underlying devices
+ * are busy (so even if we map I/Os now, the I/Os will wait on
+ * the underlying queue).
+ * In other words, if we want to kill I/Os or queue them inside us
+ * due to map unavailability, we don't return "busy". Otherwise,
+ * dm core won't give us the I/Os and we can't do what we want.
+ */
+static int multipath_busy(struct dm_target *ti)
+{
+ int busy = 0, has_active = 0;
+ struct multipath *m = ti->private;
+ struct priority_group *pg;
+ struct pgpath *pgpath;
+ unsigned long flags;
+
+ spin_lock_irqsave(&m->lock, flags);
+
+ /* Guess which priority_group will be used at next mapping time */
+ if (unlikely(!m->current_pgpath && m->next_pg))
+ pg = m->next_pg;
+ else if (likely(m->current_pg))
+ pg = m->current_pg;
+ else
+ /*
+ * We don't know which pg will be used at next mapping time.
+ * We don't call __choose_pgpath() here to avoid to trigger
+ * pg_init just by busy checking.
+ * So we don't know whether underlying devices we will be using
+ * at next mapping time are busy or not. Just try mapping.
+ */
+ goto out;
+
+ /*
+ * If there is one non-busy active path at least, the path selector
+ * will be able to select it. So we consider such a pg as not busy.
+ */
+ busy = 1;
+ list_for_each_entry(pgpath, &pg->pgpaths, list)
+ if (pgpath->is_active) {
+ has_active = 1;
+
+ if (!__pgpath_busy(pgpath)) {
+ busy = 0;
+ break;
+ }
+ }
+
+ if (!has_active)
+ /*
+ * No active path in this pg, so this pg won't be used and
+ * the current_pg will be changed at next mapping time.
+ * We need to try mapping to determine it.
+ */
+ busy = 0;
+
+out:
+ spin_unlock_irqrestore(&m->lock, flags);
+
+ return busy;
+}
+
/*-----------------------------------------------------------------
* Module setup
*---------------------------------------------------------------*/
static struct target_type multipath_target = {
.name = "multipath",
- .version = {1, 0, 5},
+ .version = {1, 1, 0},
.module = THIS_MODULE,
.ctr = multipath_ctr,
.dtr = multipath_dtr,
- .map = multipath_map,
- .end_io = multipath_end_io,
+ .map_rq = multipath_map,
+ .rq_end_io = multipath_end_io,
.presuspend = multipath_presuspend,
.resume = multipath_resume,
.status = multipath_status,
.message = multipath_message,
.ioctl = multipath_ioctl,
+ .iterate_devices = multipath_iterate_devices,
+ .busy = multipath_busy,
};
static int __init dm_multipath_init(void)
diff --git a/drivers/md/dm-path-selector.h b/drivers/md/dm-path-selector.h
index 27357b85d73d..e7d1fa8b0459 100644
--- a/drivers/md/dm-path-selector.h
+++ b/drivers/md/dm-path-selector.h
@@ -56,7 +56,8 @@ struct path_selector_type {
* the path fails.
*/
struct dm_path *(*select_path) (struct path_selector *ps,
- unsigned *repeat_count);
+ unsigned *repeat_count,
+ size_t nr_bytes);
/*
* Notify the selector that a path has failed.
@@ -75,7 +76,10 @@ struct path_selector_type {
int (*status) (struct path_selector *ps, struct dm_path *path,
status_type_t type, char *result, unsigned int maxlen);
- int (*end_io) (struct path_selector *ps, struct dm_path *path);
+ int (*start_io) (struct path_selector *ps, struct dm_path *path,
+ size_t nr_bytes);
+ int (*end_io) (struct path_selector *ps, struct dm_path *path,
+ size_t nr_bytes);
};
/* Register a path selector */
diff --git a/drivers/md/dm-queue-length.c b/drivers/md/dm-queue-length.c
new file mode 100644
index 000000000000..f92b6cea9d9c
--- /dev/null
+++ b/drivers/md/dm-queue-length.c
@@ -0,0 +1,263 @@
+/*
+ * Copyright (C) 2004-2005 IBM Corp. All Rights Reserved.
+ * Copyright (C) 2006-2009 NEC Corporation.
+ *
+ * dm-queue-length.c
+ *
+ * Module Author: Stefan Bader, IBM
+ * Modified by: Kiyoshi Ueda, NEC
+ *
+ * This file is released under the GPL.
+ *
+ * queue-length path selector - choose a path with the least number of
+ * in-flight I/Os.
+ */
+
+#include "dm.h"
+#include "dm-path-selector.h"
+
+#include <linux/slab.h>
+#include <linux/ctype.h>
+#include <linux/errno.h>
+#include <linux/module.h>
+#include <asm/atomic.h>
+
+#define DM_MSG_PREFIX "multipath queue-length"
+#define QL_MIN_IO 128
+#define QL_VERSION "0.1.0"
+
+struct selector {
+ struct list_head valid_paths;
+ struct list_head failed_paths;
+};
+
+struct path_info {
+ struct list_head list;
+ struct dm_path *path;
+ unsigned repeat_count;
+ atomic_t qlen; /* the number of in-flight I/Os */
+};
+
+static struct selector *alloc_selector(void)
+{
+ struct selector *s = kmalloc(sizeof(*s), GFP_KERNEL);
+
+ if (s) {
+ INIT_LIST_HEAD(&s->valid_paths);
+ INIT_LIST_HEAD(&s->failed_paths);
+ }
+
+ return s;
+}
+
+static int ql_create(struct path_selector *ps, unsigned argc, char **argv)
+{
+ struct selector *s = alloc_selector();
+
+ if (!s)
+ return -ENOMEM;
+
+ ps->context = s;
+ return 0;
+}
+
+static void ql_free_paths(struct list_head *paths)
+{
+ struct path_info *pi, *next;
+
+ list_for_each_entry_safe(pi, next, paths, list) {
+ list_del(&pi->list);
+ kfree(pi);
+ }
+}
+
+static void ql_destroy(struct path_selector *ps)
+{
+ struct selector *s = ps->context;
+
+ ql_free_paths(&s->valid_paths);
+ ql_free_paths(&s->failed_paths);
+ kfree(s);
+ ps->context = NULL;
+}
+
+static int ql_status(struct path_selector *ps, struct dm_path *path,
+ status_type_t type, char *result, unsigned maxlen)
+{
+ unsigned sz = 0;
+ struct path_info *pi;
+
+ /* When called with NULL path, return selector status/args. */
+ if (!path)
+ DMEMIT("0 ");
+ else {
+ pi = path->pscontext;
+
+ switch (type) {
+ case STATUSTYPE_INFO:
+ DMEMIT("%d ", atomic_read(&pi->qlen));
+ break;
+ case STATUSTYPE_TABLE:
+ DMEMIT("%u ", pi->repeat_count);
+ break;
+ }
+ }
+
+ return sz;
+}
+
+static int ql_add_path(struct path_selector *ps, struct dm_path *path,
+ int argc, char **argv, char **error)
+{
+ struct selector *s = ps->context;
+ struct path_info *pi;
+ unsigned repeat_count = QL_MIN_IO;
+
+ /*
+ * Arguments: [<repeat_count>]
+ * <repeat_count>: The number of I/Os before switching path.
+ * If not given, default (QL_MIN_IO) is used.
+ */
+ if (argc > 1) {
+ *error = "queue-length ps: incorrect number of arguments";
+ return -EINVAL;
+ }
+
+ if ((argc == 1) && (sscanf(argv[0], "%u", &repeat_count) != 1)) {
+ *error = "queue-length ps: invalid repeat count";
+ return -EINVAL;
+ }
+
+ /* Allocate the path information structure */
+ pi = kmalloc(sizeof(*pi), GFP_KERNEL);
+ if (!pi) {
+ *error = "queue-length ps: Error allocating path information";
+ return -ENOMEM;
+ }
+
+ pi->path = path;
+ pi->repeat_count = repeat_count;
+ atomic_set(&pi->qlen, 0);
+
+ path->pscontext = pi;
+
+ list_add_tail(&pi->list, &s->valid_paths);
+
+ return 0;
+}
+
+static void ql_fail_path(struct path_selector *ps, struct dm_path *path)
+{
+ struct selector *s = ps->context;
+ struct path_info *pi = path->pscontext;
+
+ list_move(&pi->list, &s->failed_paths);
+}
+
+static int ql_reinstate_path(struct path_selector *ps, struct dm_path *path)
+{
+ struct selector *s = ps->context;
+ struct path_info *pi = path->pscontext;
+
+ list_move_tail(&pi->list, &s->valid_paths);
+
+ return 0;
+}
+
+/*
+ * Select a path having the minimum number of in-flight I/Os
+ */
+static struct dm_path *ql_select_path(struct path_selector *ps,
+ unsigned *repeat_count, size_t nr_bytes)
+{
+ struct selector *s = ps->context;
+ struct path_info *pi = NULL, *best = NULL;
+
+ if (list_empty(&s->valid_paths))
+ return NULL;
+
+ /* Change preferred (first in list) path to evenly balance. */
+ list_move_tail(s->valid_paths.next, &s->valid_paths);
+
+ list_for_each_entry(pi, &s->valid_paths, list) {
+ if (!best ||
+ (atomic_read(&pi->qlen) < atomic_read(&best->qlen)))
+ best = pi;
+
+ if (!atomic_read(&best->qlen))
+ break;
+ }
+
+ if (!best)
+ return NULL;
+
+ *repeat_count = best->repeat_count;
+
+ return best->path;
+}
+
+static int ql_start_io(struct path_selector *ps, struct dm_path *path,
+ size_t nr_bytes)
+{
+ struct path_info *pi = path->pscontext;
+
+ atomic_inc(&pi->qlen);
+
+ return 0;
+}
+
+static int ql_end_io(struct path_selector *ps, struct dm_path *path,
+ size_t nr_bytes)
+{
+ struct path_info *pi = path->pscontext;
+
+ atomic_dec(&pi->qlen);
+
+ return 0;
+}
+
+static struct path_selector_type ql_ps = {
+ .name = "queue-length",
+ .module = THIS_MODULE,
+ .table_args = 1,
+ .info_args = 1,
+ .create = ql_create,
+ .destroy = ql_destroy,
+ .status = ql_status,
+ .add_path = ql_add_path,
+ .fail_path = ql_fail_path,
+ .reinstate_path = ql_reinstate_path,
+ .select_path = ql_select_path,
+ .start_io = ql_start_io,
+ .end_io = ql_end_io,
+};
+
+static int __init dm_ql_init(void)
+{
+ int r = dm_register_path_selector(&ql_ps);
+
+ if (r < 0)
+ DMERR("register failed %d", r);
+
+ DMINFO("version " QL_VERSION " loaded");
+
+ return r;
+}
+
+static void __exit dm_ql_exit(void)
+{
+ int r = dm_unregister_path_selector(&ql_ps);
+
+ if (r < 0)
+ DMERR("unregister failed %d", r);
+}
+
+module_init(dm_ql_init);
+module_exit(dm_ql_exit);
+
+MODULE_AUTHOR("Stefan Bader <Stefan.Bader at de.ibm.com>");
+MODULE_DESCRIPTION(
+ "(C) Copyright IBM Corp. 2004,2005 All Rights Reserved.\n"
+ DM_NAME " path selector to balance the number of in-flight I/Os"
+);
+MODULE_LICENSE("GPL");
diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
index 076fbb4e967a..ce8868c768cc 100644
--- a/drivers/md/dm-raid1.c
+++ b/drivers/md/dm-raid1.c
@@ -1283,9 +1283,23 @@ static int mirror_status(struct dm_target *ti, status_type_t type,
return 0;
}
+static int mirror_iterate_devices(struct dm_target *ti,
+ iterate_devices_callout_fn fn, void *data)
+{
+ struct mirror_set *ms = ti->private;
+ int ret = 0;
+ unsigned i;
+
+ for (i = 0; !ret && i < ms->nr_mirrors; i++)
+ ret = fn(ti, ms->mirror[i].dev,
+ ms->mirror[i].offset, data);
+
+ return ret;
+}
+
static struct target_type mirror_target = {
.name = "mirror",
- .version = {1, 0, 20},
+ .version = {1, 12, 0},
.module = THIS_MODULE,
.ctr = mirror_ctr,
.dtr = mirror_dtr,
@@ -1295,6 +1309,7 @@ static struct target_type mirror_target = {
.postsuspend = mirror_postsuspend,
.resume = mirror_resume,
.status = mirror_status,
+ .iterate_devices = mirror_iterate_devices,
};
static int __init dm_mirror_init(void)
diff --git a/drivers/md/dm-region-hash.c b/drivers/md/dm-region-hash.c
index 7b899be0b087..36dbe29f2fd6 100644
--- a/drivers/md/dm-region-hash.c
+++ b/drivers/md/dm-region-hash.c
@@ -283,7 +283,7 @@ static struct dm_region *__rh_alloc(struct dm_region_hash *rh, region_t region)
nreg = mempool_alloc(rh->region_pool, GFP_ATOMIC);
if (unlikely(!nreg))
- nreg = kmalloc(sizeof(*nreg), GFP_NOIO);
+ nreg = kmalloc(sizeof(*nreg), GFP_NOIO | __GFP_NOFAIL);
nreg->state = rh->log->type->in_sync(rh->log, region, 1) ?
DM_RH_CLEAN : DM_RH_NOSYNC;
diff --git a/drivers/md/dm-round-robin.c b/drivers/md/dm-round-robin.c
index cdfbf65b28cb..24752f449bef 100644
--- a/drivers/md/dm-round-robin.c
+++ b/drivers/md/dm-round-robin.c
@@ -161,7 +161,7 @@ static int rr_reinstate_path(struct path_selector *ps, struct dm_path *p)
}
static struct dm_path *rr_select_path(struct path_selector *ps,
- unsigned *repeat_count)
+ unsigned *repeat_count, size_t nr_bytes)
{
struct selector *s = (struct selector *) ps->context;
struct path_info *pi = NULL;
diff --git a/drivers/md/dm-service-time.c b/drivers/md/dm-service-time.c
new file mode 100644
index 000000000000..cfa668f46c40
--- /dev/null
+++ b/drivers/md/dm-service-time.c
@@ -0,0 +1,339 @@
+/*
+ * Copyright (C) 2007-2009 NEC Corporation. All Rights Reserved.
+ *
+ * Module Author: Kiyoshi Ueda
+ *
+ * This file is released under the GPL.
+ *
+ * Throughput oriented path selector.
+ */
+
+#include "dm.h"
+#include "dm-path-selector.h"
+
+#define DM_MSG_PREFIX "multipath service-time"
+#define ST_MIN_IO 1
+#define ST_MAX_RELATIVE_THROUGHPUT 100
+#define ST_MAX_RELATIVE_THROUGHPUT_SHIFT 7
+#define ST_MAX_INFLIGHT_SIZE ((size_t)-1 >> ST_MAX_RELATIVE_THROUGHPUT_SHIFT)
+#define ST_VERSION "0.2.0"
+
+struct selector {
+ struct list_head valid_paths;
+ struct list_head failed_paths;
+};
+
+struct path_info {
+ struct list_head list;
+ struct dm_path *path;
+ unsigned repeat_count;
+ unsigned relative_throughput;
+ atomic_t in_flight_size; /* Total size of in-flight I/Os */
+};
+
+static struct selector *alloc_selector(void)
+{
+ struct selector *s = kmalloc(sizeof(*s), GFP_KERNEL);
+
+ if (s) {
+ INIT_LIST_HEAD(&s->valid_paths);
+ INIT_LIST_HEAD(&s->failed_paths);
+ }
+
+ return s;
+}
+
+static int st_create(struct path_selector *ps, unsigned argc, char **argv)
+{
+ struct selector *s = alloc_selector();
+
+ if (!s)
+ return -ENOMEM;
+
+ ps->context = s;
+ return 0;
+}
+
+static void free_paths(struct list_head *paths)
+{
+ struct path_info *pi, *next;
+
+ list_for_each_entry_safe(pi, next, paths, list) {
+ list_del(&pi->list);
+ kfree(pi);
+ }
+}
+
+static void st_destroy(struct path_selector *ps)
+{
+ struct selector *s = ps->context;
+
+ free_paths(&s->valid_paths);
+ free_paths(&s->failed_paths);
+ kfree(s);
+ ps->context = NULL;
+}
+
+static int st_status(struct path_selector *ps, struct dm_path *path,
+ status_type_t type, char *result, unsigned maxlen)
+{
+ unsigned sz = 0;
+ struct path_info *pi;
+
+ if (!path)
+ DMEMIT("0 ");
+ else {
+ pi = path->pscontext;
+
+ switch (type) {
+ case STATUSTYPE_INFO:
+ DMEMIT("%d %u ", atomic_read(&pi->in_flight_size),
+ pi->relative_throughput);
+ break;
+ case STATUSTYPE_TABLE:
+ DMEMIT("%u %u ", pi->repeat_count,
+ pi->relative_throughput);
+ break;
+ }
+ }
+
+ return sz;
+}
+
+static int st_add_path(struct path_selector *ps, struct dm_path *path,
+ int argc, char **argv, char **error)
+{
+ struct selector *s = ps->context;
+ struct path_info *pi;
+ unsigned repeat_count = ST_MIN_IO;
+ unsigned relative_throughput = 1;
+
+ /*
+ * Arguments: [<repeat_count> [<relative_throughput>]]
+ * <repeat_count>: The number of I/Os before switching path.
+ * If not given, default (ST_MIN_IO) is used.
+ * <relative_throughput>: The relative throughput value of
+ * the path among all paths in the path-group.
+ * The valid range: 0-<ST_MAX_RELATIVE_THROUGHPUT>
+ * If not given, minimum value '1' is used.
+ * If '0' is given, the path isn't selected while
+ * other paths having a positive value are
+ * available.
+ */
+ if (argc > 2) {
+ *error = "service-time ps: incorrect number of arguments";
+ return -EINVAL;
+ }
+
+ if (argc && (sscanf(argv[0], "%u", &repeat_count) != 1)) {
+ *error = "service-time ps: invalid repeat count";
+ return -EINVAL;
+ }
+
+ if ((argc == 2) &&
+ (sscanf(argv[1], "%u", &relative_throughput) != 1 ||
+ relative_throughput > ST_MAX_RELATIVE_THROUGHPUT)) {
+ *error = "service-time ps: invalid relative_throughput value";
+ return -EINVAL;
+ }
+
+ /* allocate the path */
+ pi = kmalloc(sizeof(*pi), GFP_KERNEL);
+ if (!pi) {
+ *error = "service-time ps: Error allocating path context";
+ return -ENOMEM;
+ }
+
+ pi->path = path;
+ pi->repeat_count = repeat_count;
+ pi->relative_throughput = relative_throughput;
+ atomic_set(&pi->in_flight_size, 0);
+
+ path->pscontext = pi;
+
+ list_add_tail(&pi->list, &s->valid_paths);
+
+ return 0;
+}
+
+static void st_fail_path(struct path_selector *ps, struct dm_path *path)
+{
+ struct selector *s = ps->context;
+ struct path_info *pi = path->pscontext;
+
+ list_move(&pi->list, &s->failed_paths);
+}
+
+static int st_reinstate_path(struct path_selector *ps, struct dm_path *path)
+{
+ struct selector *s = ps->context;
+ struct path_info *pi = path->pscontext;
+
+ list_move_tail(&pi->list, &s->valid_paths);
+
+ return 0;
+}
+
+/*
+ * Compare the estimated service time of 2 paths, pi1 and pi2,
+ * for the incoming I/O.
+ *
+ * Returns:
+ * < 0 : pi1 is better
+ * 0 : no difference between pi1 and pi2
+ * > 0 : pi2 is better
+ *
+ * Description:
+ * Basically, the service time is estimated by:
+ * ('pi->in-flight-size' + 'incoming') / 'pi->relative_throughput'
+ * To reduce the calculation, some optimizations are made.
+ * (See comments inline)
+ */
+static int st_compare_load(struct path_info *pi1, struct path_info *pi2,
+ size_t incoming)
+{
+ size_t sz1, sz2, st1, st2;
+
+ sz1 = atomic_read(&pi1->in_flight_size);
+ sz2 = atomic_read(&pi2->in_flight_size);
+
+ /*
+ * Case 1: Both have same throughput value. Choose less loaded path.
+ */
+ if (pi1->relative_throughput == pi2->relative_throughput)
+ return sz1 - sz2;
+
+ /*
+ * Case 2a: Both have same load. Choose higher throughput path.
+ * Case 2b: One path has no throughput value. Choose the other one.
+ */
+ if (sz1 == sz2 ||
+ !pi1->relative_throughput || !pi2->relative_throughput)
+ return pi2->relative_throughput - pi1->relative_throughput;
+
+ /*
+ * Case 3: Calculate service time. Choose faster path.
+ * Service time using pi1:
+ * st1 = (sz1 + incoming) / pi1->relative_throughput
+ * Service time using pi2:
+ * st2 = (sz2 + incoming) / pi2->relative_throughput
+ *
+ * To avoid the division, transform the expression to use
+ * multiplication.
+ * Because ->relative_throughput > 0 here, if st1 < st2,
+ * the expressions below are the same meaning:
+ * (sz1 + incoming) / pi1->relative_throughput <
+ * (sz2 + incoming) / pi2->relative_throughput
+ * (sz1 + incoming) * pi2->relative_throughput <
+ * (sz2 + incoming) * pi1->relative_throughput
+ * So use the later one.
+ */
+ sz1 += incoming;
+ sz2 += incoming;
+ if (unlikely(sz1 >= ST_MAX_INFLIGHT_SIZE ||
+ sz2 >= ST_MAX_INFLIGHT_SIZE)) {
+ /*
+ * Size may be too big for multiplying pi->relative_throughput
+ * and overflow.
+ * To avoid the overflow and mis-selection, shift down both.
+ */
+ sz1 >>= ST_MAX_RELATIVE_THROUGHPUT_SHIFT;
+ sz2 >>= ST_MAX_RELATIVE_THROUGHPUT_SHIFT;
+ }
+ st1 = sz1 * pi2->relative_throughput;
+ st2 = sz2 * pi1->relative_throughput;
+ if (st1 != st2)
+ return st1 - st2;
+
+ /*
+ * Case 4: Service time is equal. Choose higher throughput path.
+ */
+ return pi2->relative_throughput - pi1->relative_throughput;
+}
+
+static struct dm_path *st_select_path(struct path_selector *ps,
+ unsigned *repeat_count, size_t nr_bytes)
+{
+ struct selector *s = ps->context;
+ struct path_info *pi = NULL, *best = NULL;
+
+ if (list_empty(&s->valid_paths))
+ return NULL;
+
+ /* Change preferred (first in list) path to evenly balance. */
+ list_move_tail(s->valid_paths.next, &s->valid_paths);
+
+ list_for_each_entry(pi, &s->valid_paths, list)
+ if (!best || (st_compare_load(pi, best, nr_bytes) < 0))
+ best = pi;
+
+ if (!best)
+ return NULL;
+
+ *repeat_count = best->repeat_count;
+
+ return best->path;
+}
+
+static int st_start_io(struct path_selector *ps, struct dm_path *path,
+ size_t nr_bytes)
+{
+ struct path_info *pi = path->pscontext;
+
+ atomic_add(nr_bytes, &pi->in_flight_size);
+
+ return 0;
+}
+
+static int st_end_io(struct path_selector *ps, struct dm_path *path,
+ size_t nr_bytes)
+{
+ struct path_info *pi = path->pscontext;
+
+ atomic_sub(nr_bytes, &pi->in_flight_size);
+
+ return 0;
+}
+
+static struct path_selector_type st_ps = {
+ .name = "service-time",
+ .module = THIS_MODULE,
+ .table_args = 2,
+ .info_args = 2,
+ .create = st_create,
+ .destroy = st_destroy,
+ .status = st_status,
+ .add_path = st_add_path,
+ .fail_path = st_fail_path,
+ .reinstate_path = st_reinstate_path,
+ .select_path = st_select_path,
+ .start_io = st_start_io,
+ .end_io = st_end_io,
+};
+
+static int __init dm_st_init(void)
+{
+ int r = dm_register_path_selector(&st_ps);
+
+ if (r < 0)
+ DMERR("register failed %d", r);
+
+ DMINFO("version " ST_VERSION " loaded");
+
+ return r;
+}
+
+static void __exit dm_st_exit(void)
+{
+ int r = dm_unregister_path_selector(&st_ps);
+
+ if (r < 0)
+ DMERR("unregister failed %d", r);
+}
+
+module_init(dm_st_init);
+module_exit(dm_st_exit);
+
+MODULE_DESCRIPTION(DM_NAME " throughput oriented path selector");
+MODULE_AUTHOR("Kiyoshi Ueda <k-ueda@ct.jp.nec.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/md/dm-snap-persistent.c b/drivers/md/dm-snap-persistent.c
index 2662a41337e7..6e3fe4f14934 100644
--- a/drivers/md/dm-snap-persistent.c
+++ b/drivers/md/dm-snap-persistent.c
@@ -636,7 +636,7 @@ static void persistent_commit_exception(struct dm_exception_store *store,
/*
* Commit exceptions to disk.
*/
- if (ps->valid && area_io(ps, WRITE))
+ if (ps->valid && area_io(ps, WRITE_BARRIER))
ps->valid = 0;
/*
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
index d73f17fc7778..d573165cd2b7 100644
--- a/drivers/md/dm-snap.c
+++ b/drivers/md/dm-snap.c
@@ -678,6 +678,7 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
ti->private = s;
ti->split_io = s->store->chunk_size;
+ ti->num_flush_requests = 1;
return 0;
@@ -1030,6 +1031,11 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio,
chunk_t chunk;
struct dm_snap_pending_exception *pe = NULL;
+ if (unlikely(bio_empty_barrier(bio))) {
+ bio->bi_bdev = s->store->cow->bdev;
+ return DM_MAPIO_REMAPPED;
+ }
+
chunk = sector_to_chunk(s->store, bio->bi_sector);
/* Full snapshots are not usable */
@@ -1338,6 +1344,8 @@ static int origin_ctr(struct dm_target *ti, unsigned int argc, char **argv)
}
ti->private = dev;
+ ti->num_flush_requests = 1;
+
return 0;
}
@@ -1353,6 +1361,9 @@ static int origin_map(struct dm_target *ti, struct bio *bio,
struct dm_dev *dev = ti->private;
bio->bi_bdev = dev->bdev;
+ if (unlikely(bio_empty_barrier(bio)))
+ return DM_MAPIO_REMAPPED;
+
/* Only tell snapshots if this is a write */
return (bio_rw(bio) == WRITE) ? do_origin(dev, bio) : DM_MAPIO_REMAPPED;
}
diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
index 41569bc60abc..b240e85ae39a 100644
--- a/drivers/md/dm-stripe.c
+++ b/drivers/md/dm-stripe.c
@@ -167,6 +167,7 @@ static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv)
sc->stripes = stripes;
sc->stripe_width = width;
ti->split_io = chunk_size;
+ ti->num_flush_requests = stripes;
sc->chunk_mask = ((sector_t) chunk_size) - 1;
for (sc->chunk_shift = 0; chunk_size; sc->chunk_shift++)
@@ -211,10 +212,18 @@ static int stripe_map(struct dm_target *ti, struct bio *bio,
union map_info *map_context)
{
struct stripe_c *sc = (struct stripe_c *) ti->private;
+ sector_t offset, chunk;
+ uint32_t stripe;
- sector_t offset = bio->bi_sector - ti->begin;
- sector_t chunk = offset >> sc->chunk_shift;
- uint32_t stripe = sector_div(chunk, sc->stripes);
+ if (unlikely(bio_empty_barrier(bio))) {
+ BUG_ON(map_context->flush_request >= sc->stripes);
+ bio->bi_bdev = sc->stripe[map_context->flush_request].dev->bdev;
+ return DM_MAPIO_REMAPPED;
+ }
+
+ offset = bio->bi_sector - ti->begin;
+ chunk = offset >> sc->chunk_shift;
+ stripe = sector_div(chunk, sc->stripes);
bio->bi_bdev = sc->stripe[stripe].dev->bdev;
bio->bi_sector = sc->stripe[stripe].physical_start +
@@ -304,15 +313,31 @@ static int stripe_end_io(struct dm_target *ti, struct bio *bio,
return error;
}
+static int stripe_iterate_devices(struct dm_target *ti,
+ iterate_devices_callout_fn fn, void *data)
+{
+ struct stripe_c *sc = ti->private;
+ int ret = 0;
+ unsigned i = 0;
+
+ do
+ ret = fn(ti, sc->stripe[i].dev,
+ sc->stripe[i].physical_start, data);
+ while (!ret && ++i < sc->stripes);
+
+ return ret;
+}
+
static struct target_type stripe_target = {
.name = "striped",
- .version = {1, 1, 0},
+ .version = {1, 2, 0},
.module = THIS_MODULE,
.ctr = stripe_ctr,
.dtr = stripe_dtr,
.map = stripe_map,
.end_io = stripe_end_io,
.status = stripe_status,
+ .iterate_devices = stripe_iterate_devices,
};
int __init dm_stripe_init(void)
diff --git a/drivers/md/dm-sysfs.c b/drivers/md/dm-sysfs.c
index a2a45e6c7c8b..4b045903a4e2 100644
--- a/drivers/md/dm-sysfs.c
+++ b/drivers/md/dm-sysfs.c
@@ -57,12 +57,21 @@ static ssize_t dm_attr_uuid_show(struct mapped_device *md, char *buf)
return strlen(buf);
}
+static ssize_t dm_attr_suspended_show(struct mapped_device *md, char *buf)
+{
+ sprintf(buf, "%d\n", dm_suspended(md));
+
+ return strlen(buf);
+}
+
static DM_ATTR_RO(name);
static DM_ATTR_RO(uuid);
+static DM_ATTR_RO(suspended);
static struct attribute *dm_attrs[] = {
&dm_attr_name.attr,
&dm_attr_uuid.attr,
+ &dm_attr_suspended.attr,
NULL,
};
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index e9a73bb242b0..2cba557d9e61 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -41,6 +41,7 @@
struct dm_table {
struct mapped_device *md;
atomic_t holders;
+ unsigned type;
/* btree table */
unsigned int depth;
@@ -62,15 +63,11 @@ struct dm_table {
/* a list of devices used by this table */
struct list_head devices;
- /*
- * These are optimistic limits taken from all the
- * targets, some targets will need smaller limits.
- */
- struct io_restrictions limits;
-
/* events get handed up using this callback */
void (*event_fn)(void *);
void *event_context;
+
+ struct dm_md_mempools *mempools;
};
/*
@@ -89,43 +86,6 @@ static unsigned int int_log(unsigned int n, unsigned int base)
}
/*
- * Returns the minimum that is _not_ zero, unless both are zero.
- */
-#define min_not_zero(l, r) (l == 0) ? r : ((r == 0) ? l : min(l, r))
-
-/*
- * Combine two io_restrictions, always taking the lower value.
- */
-static void combine_restrictions_low(struct io_restrictions *lhs,
- struct io_restrictions *rhs)
-{
- lhs->max_sectors =
- min_not_zero(lhs->max_sectors, rhs->max_sectors);
-
- lhs->max_phys_segments =
- min_not_zero(lhs->max_phys_segments, rhs->max_phys_segments);
-
- lhs->max_hw_segments =
- min_not_zero(lhs->max_hw_segments, rhs->max_hw_segments);
-
- lhs->logical_block_size = max(lhs->logical_block_size,
- rhs->logical_block_size);
-
- lhs->max_segment_size =
- min_not_zero(lhs->max_segment_size, rhs->max_segment_size);
-
- lhs->max_hw_sectors =
- min_not_zero(lhs->max_hw_sectors, rhs->max_hw_sectors);
-
- lhs->seg_boundary_mask =
- min_not_zero(lhs->seg_boundary_mask, rhs->seg_boundary_mask);
-
- lhs->bounce_pfn = min_not_zero(lhs->bounce_pfn, rhs->bounce_pfn);
-
- lhs->no_cluster |= rhs->no_cluster;
-}
-
-/*
* Calculate the index of the child node of the n'th node k'th key.
*/
static inline unsigned int get_child(unsigned int n, unsigned int k)
@@ -267,6 +227,8 @@ static void free_devices(struct list_head *devices)
list_for_each_safe(tmp, next, devices) {
struct dm_dev_internal *dd =
list_entry(tmp, struct dm_dev_internal, list);
+ DMWARN("dm_table_destroy: dm_put_device call missing for %s",
+ dd->dm_dev.name);
kfree(dd);
}
}
@@ -296,12 +258,10 @@ void dm_table_destroy(struct dm_table *t)
vfree(t->highs);
/* free the device list */
- if (t->devices.next != &t->devices) {
- DMWARN("devices still present during destroy: "
- "dm_table_remove_device calls missing");
-
+ if (t->devices.next != &t->devices)
free_devices(&t->devices);
- }
+
+ dm_free_md_mempools(t->mempools);
kfree(t);
}
@@ -385,15 +345,48 @@ static void close_dev(struct dm_dev_internal *d, struct mapped_device *md)
/*
* If possible, this checks an area of a destination device is valid.
*/
-static int check_device_area(struct dm_dev_internal *dd, sector_t start,
- sector_t len)
+static int device_area_is_valid(struct dm_target *ti, struct dm_dev *dev,
+ sector_t start, void *data)
{
- sector_t dev_size = dd->dm_dev.bdev->bd_inode->i_size >> SECTOR_SHIFT;
+ struct queue_limits *limits = data;
+ struct block_device *bdev = dev->bdev;
+ sector_t dev_size =
+ i_size_read(bdev->bd_inode) >> SECTOR_SHIFT;
+ unsigned short logical_block_size_sectors =
+ limits->logical_block_size >> SECTOR_SHIFT;
+ char b[BDEVNAME_SIZE];
if (!dev_size)
return 1;
- return ((start < dev_size) && (len <= (dev_size - start)));
+ if ((start >= dev_size) || (start + ti->len > dev_size)) {
+ DMWARN("%s: %s too small for target",
+ dm_device_name(ti->table->md), bdevname(bdev, b));
+ return 0;
+ }
+
+ if (logical_block_size_sectors <= 1)
+ return 1;
+
+ if (start & (logical_block_size_sectors - 1)) {
+ DMWARN("%s: start=%llu not aligned to h/w "
+ "logical block size %hu of %s",
+ dm_device_name(ti->table->md),
+ (unsigned long long)start,
+ limits->logical_block_size, bdevname(bdev, b));
+ return 0;
+ }
+
+ if (ti->len & (logical_block_size_sectors - 1)) {
+ DMWARN("%s: len=%llu not aligned to h/w "
+ "logical block size %hu of %s",
+ dm_device_name(ti->table->md),
+ (unsigned long long)ti->len,
+ limits->logical_block_size, bdevname(bdev, b));
+ return 0;
+ }
+
+ return 1;
}
/*
@@ -479,38 +472,32 @@ static int __table_get_device(struct dm_table *t, struct dm_target *ti,
}
atomic_inc(&dd->count);
- if (!check_device_area(dd, start, len)) {
- DMWARN("device %s too small for target", path);
- dm_put_device(ti, &dd->dm_dev);
- return -EINVAL;
- }
-
*result = &dd->dm_dev;
-
return 0;
}
-void dm_set_device_limits(struct dm_target *ti, struct block_device *bdev)
+/*
+ * Returns the minimum that is _not_ zero, unless both are zero.
+ */
+#define min_not_zero(l, r) (l == 0) ? r : ((r == 0) ? l : min(l, r))
+
+int dm_set_device_limits(struct dm_target *ti, struct dm_dev *dev,
+ sector_t start, void *data)
{
+ struct queue_limits *limits = data;
+ struct block_device *bdev = dev->bdev;
struct request_queue *q = bdev_get_queue(bdev);
- struct io_restrictions *rs = &ti->limits;
char b[BDEVNAME_SIZE];
if (unlikely(!q)) {
DMWARN("%s: Cannot set limits for nonexistent device %s",
dm_device_name(ti->table->md), bdevname(bdev, b));
- return;
+ return 0;
}
- /*
- * Combine the device limits low.
- *
- * FIXME: if we move an io_restriction struct
- * into q this would just be a call to
- * combine_restrictions_low()
- */
- rs->max_sectors =
- min_not_zero(rs->max_sectors, queue_max_sectors(q));
+ if (blk_stack_limits(limits, &q->limits, start << 9) < 0)
+ DMWARN("%s: target device %s is misaligned",
+ dm_device_name(ti->table->md), bdevname(bdev, b));
/*
* Check if merge fn is supported.
@@ -519,48 +506,21 @@ void dm_set_device_limits(struct dm_target *ti, struct block_device *bdev)
*/
if (q->merge_bvec_fn && !ti->type->merge)
- rs->max_sectors =
- min_not_zero(rs->max_sectors,
+ limits->max_sectors =
+ min_not_zero(limits->max_sectors,
(unsigned int) (PAGE_SIZE >> 9));
-
- rs->max_phys_segments =
- min_not_zero(rs->max_phys_segments,
- queue_max_phys_segments(q));
-
- rs->max_hw_segments =
- min_not_zero(rs->max_hw_segments, queue_max_hw_segments(q));
-
- rs->logical_block_size = max(rs->logical_block_size,
- queue_logical_block_size(q));
-
- rs->max_segment_size =
- min_not_zero(rs->max_segment_size, queue_max_segment_size(q));
-
- rs->max_hw_sectors =
- min_not_zero(rs->max_hw_sectors, queue_max_hw_sectors(q));
-
- rs->seg_boundary_mask =
- min_not_zero(rs->seg_boundary_mask,
- queue_segment_boundary(q));
-
- rs->bounce_pfn = min_not_zero(rs->bounce_pfn, queue_bounce_pfn(q));
-
- rs->no_cluster |= !test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags);
+ return 0;
}
EXPORT_SYMBOL_GPL(dm_set_device_limits);
int dm_get_device(struct dm_target *ti, const char *path, sector_t start,
sector_t len, fmode_t mode, struct dm_dev **result)
{
- int r = __table_get_device(ti->table, ti, path,
- start, len, mode, result);
-
- if (!r)
- dm_set_device_limits(ti, (*result)->bdev);
-
- return r;
+ return __table_get_device(ti->table, ti, path,
+ start, len, mode, result);
}
+
/*
* Decrement a devices use count and remove it if necessary.
*/
@@ -675,24 +635,78 @@ int dm_split_args(int *argc, char ***argvp, char *input)
return 0;
}
-static void check_for_valid_limits(struct io_restrictions *rs)
+/*
+ * Impose necessary and sufficient conditions on a devices's table such
+ * that any incoming bio which respects its logical_block_size can be
+ * processed successfully. If it falls across the boundary between
+ * two or more targets, the size of each piece it gets split into must
+ * be compatible with the logical_block_size of the target processing it.
+ */
+static int validate_hardware_logical_block_alignment(struct dm_table *table,
+ struct queue_limits *limits)
{
- if (!rs->max_sectors)
- rs->max_sectors = SAFE_MAX_SECTORS;
- if (!rs->max_hw_sectors)
- rs->max_hw_sectors = SAFE_MAX_SECTORS;
- if (!rs->max_phys_segments)
- rs->max_phys_segments = MAX_PHYS_SEGMENTS;
- if (!rs->max_hw_segments)
- rs->max_hw_segments = MAX_HW_SEGMENTS;
- if (!rs->logical_block_size)
- rs->logical_block_size = 1 << SECTOR_SHIFT;
- if (!rs->max_segment_size)
- rs->max_segment_size = MAX_SEGMENT_SIZE;
- if (!rs->seg_boundary_mask)
- rs->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK;
- if (!rs->bounce_pfn)
- rs->bounce_pfn = -1;
+ /*
+ * This function uses arithmetic modulo the logical_block_size
+ * (in units of 512-byte sectors).
+ */
+ unsigned short device_logical_block_size_sects =
+ limits->logical_block_size >> SECTOR_SHIFT;
+
+ /*
+ * Offset of the start of the next table entry, mod logical_block_size.
+ */
+ unsigned short next_target_start = 0;
+
+ /*
+ * Given an aligned bio that extends beyond the end of a
+ * target, how many sectors must the next target handle?
+ */
+ unsigned short remaining = 0;
+
+ struct dm_target *uninitialized_var(ti);
+ struct queue_limits ti_limits;
+ unsigned i = 0;
+
+ /*
+ * Check each entry in the table in turn.
+ */
+ while (i < dm_table_get_num_targets(table)) {
+ ti = dm_table_get_target(table, i++);
+
+ blk_set_default_limits(&ti_limits);
+
+ /* combine all target devices' limits */
+ if (ti->type->iterate_devices)
+ ti->type->iterate_devices(ti, dm_set_device_limits,
+ &ti_limits);
+
+ /*
+ * If the remaining sectors fall entirely within this
+ * table entry are they compatible with its logical_block_size?
+ */
+ if (remaining < ti->len &&
+ remaining & ((ti_limits.logical_block_size >>
+ SECTOR_SHIFT) - 1))
+ break; /* Error */
+
+ next_target_start =
+ (unsigned short) ((next_target_start + ti->len) &
+ (device_logical_block_size_sects - 1));
+ remaining = next_target_start ?
+ device_logical_block_size_sects - next_target_start : 0;
+ }
+
+ if (remaining) {
+ DMWARN("%s: table line %u (start sect %llu len %llu) "
+ "not aligned to h/w logical block size %hu",
+ dm_device_name(table->md), i,
+ (unsigned long long) ti->begin,
+ (unsigned long long) ti->len,
+ limits->logical_block_size);
+ return -EINVAL;
+ }
+
+ return 0;
}
int dm_table_add_target(struct dm_table *t, const char *type,
@@ -747,9 +761,6 @@ int dm_table_add_target(struct dm_table *t, const char *type,
t->highs[t->num_targets++] = tgt->begin + tgt->len - 1;
- /* FIXME: the plan is to combine high here and then have
- * the merge fn apply the target level restrictions. */
- combine_restrictions_low(&t->limits, &tgt->limits);
return 0;
bad:
@@ -758,6 +769,104 @@ int dm_table_add_target(struct dm_table *t, const char *type,
return r;
}
+int dm_table_set_type(struct dm_table *t)
+{
+ unsigned i;
+ unsigned bio_based = 0, request_based = 0;
+ struct dm_target *tgt;
+ struct dm_dev_internal *dd;
+ struct list_head *devices;
+
+ for (i = 0; i < t->num_targets; i++) {
+ tgt = t->targets + i;
+ if (dm_target_request_based(tgt))
+ request_based = 1;
+ else
+ bio_based = 1;
+
+ if (bio_based && request_based) {
+ DMWARN("Inconsistent table: different target types"
+ " can't be mixed up");
+ return -EINVAL;
+ }
+ }
+
+ if (bio_based) {
+ /* We must use this table as bio-based */
+ t->type = DM_TYPE_BIO_BASED;
+ return 0;
+ }
+
+ BUG_ON(!request_based); /* No targets in this table */
+
+ /* Non-request-stackable devices can't be used for request-based dm */
+ devices = dm_table_get_devices(t);
+ list_for_each_entry(dd, devices, list) {
+ if (!blk_queue_stackable(bdev_get_queue(dd->dm_dev.bdev))) {
+ DMWARN("table load rejected: including"
+ " non-request-stackable devices");
+ return -EINVAL;
+ }
+ }
+
+ /*
+ * Request-based dm supports only tables that have a single target now.
+ * To support multiple targets, request splitting support is needed,
+ * and that needs lots of changes in the block-layer.
+ * (e.g. request completion process for partial completion.)
+ */
+ if (t->num_targets > 1) {
+ DMWARN("Request-based dm doesn't support multiple targets yet");
+ return -EINVAL;
+ }
+
+ t->type = DM_TYPE_REQUEST_BASED;
+
+ return 0;
+}
+
+unsigned dm_table_get_type(struct dm_table *t)
+{
+ return t->type;
+}
+
+bool dm_table_bio_based(struct dm_table *t)
+{
+ return dm_table_get_type(t) == DM_TYPE_BIO_BASED;
+}
+
+bool dm_table_request_based(struct dm_table *t)
+{
+ return dm_table_get_type(t) == DM_TYPE_REQUEST_BASED;
+}
+
+int dm_table_alloc_md_mempools(struct dm_table *t)
+{
+ unsigned type = dm_table_get_type(t);
+
+ if (unlikely(type == DM_TYPE_NONE)) {
+ DMWARN("no table type is set, can't allocate mempools");
+ return -EINVAL;
+ }
+
+ t->mempools = dm_alloc_md_mempools(type);
+ if (!t->mempools)
+ return -ENOMEM;
+
+ return 0;
+}
+
+void dm_table_free_md_mempools(struct dm_table *t)
+{
+ dm_free_md_mempools(t->mempools);
+ t->mempools = NULL;
+}
+
+struct dm_md_mempools *dm_table_get_md_mempools(struct dm_table *t)
+{
+ return t->mempools;
+}
+
static int setup_indexes(struct dm_table *t)
{
int i;
@@ -792,8 +901,6 @@ int dm_table_complete(struct dm_table *t)
int r = 0;
unsigned int leaf_nodes;
- check_for_valid_limits(&t->limits);
-
/* how many indexes will the btree have ? */
leaf_nodes = dm_div_up(t->num_targets, KEYS_PER_NODE);
t->depth = 1 + int_log(leaf_nodes, CHILDREN_PER_NODE);
@@ -869,6 +976,57 @@ struct dm_target *dm_table_find_target(struct dm_table *t, sector_t sector)
}
/*
+ * Establish the new table's queue_limits and validate them.
+ */
+int dm_calculate_queue_limits(struct dm_table *table,
+ struct queue_limits *limits)
+{
+ struct dm_target *uninitialized_var(ti);
+ struct queue_limits ti_limits;
+ unsigned i = 0;
+
+ blk_set_default_limits(limits);
+
+ while (i < dm_table_get_num_targets(table)) {
+ blk_set_default_limits(&ti_limits);
+
+ ti = dm_table_get_target(table, i++);
+
+ if (!ti->type->iterate_devices)
+ goto combine_limits;
+
+ /*
+ * Combine queue limits of all the devices this target uses.
+ */
+ ti->type->iterate_devices(ti, dm_set_device_limits,
+ &ti_limits);
+
+ /*
+ * Check each device area is consistent with the target's
+ * overall queue limits.
+ */
+ if (!ti->type->iterate_devices(ti, device_area_is_valid,
+ &ti_limits))
+ return -EINVAL;
+
+combine_limits:
+ /*
+ * Merge this target's queue limits into the overall limits
+ * for the table.
+ */
+ if (blk_stack_limits(limits, &ti_limits, 0) < 0)
+ DMWARN("%s: target device "
+ "(start sect %llu len %llu) "
+ "is misaligned",
+ dm_device_name(table->md),
+ (unsigned long long) ti->begin,
+ (unsigned long long) ti->len);
+ }
+
+ return validate_hardware_logical_block_alignment(table, limits);
+}
+
+/*
* Set the integrity profile for this device if all devices used have
* matching profiles.
*/
@@ -907,27 +1065,42 @@ no_integrity:
return;
}
-void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q)
+void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
+ struct queue_limits *limits)
{
/*
- * Make sure we obey the optimistic sub devices
- * restrictions.
+ * Each target device in the table has a data area that should normally
+ * be aligned such that the DM device's alignment_offset is 0.
+ * FIXME: Propagate alignment_offsets up the stack and warn of
+ * sub-optimal or inconsistent settings.
+ */
+ limits->alignment_offset = 0;
+ limits->misaligned = 0;
+
+ /*
+ * Copy table's limits to the DM device's request_queue
*/
- blk_queue_max_sectors(q, t->limits.max_sectors);
- blk_queue_max_phys_segments(q, t->limits.max_phys_segments);
- blk_queue_max_hw_segments(q, t->limits.max_hw_segments);
- blk_queue_logical_block_size(q, t->limits.logical_block_size);
- blk_queue_max_segment_size(q, t->limits.max_segment_size);
- blk_queue_max_hw_sectors(q, t->limits.max_hw_sectors);
- blk_queue_segment_boundary(q, t->limits.seg_boundary_mask);
- blk_queue_bounce_limit(q, t->limits.bounce_pfn);
-
- if (t->limits.no_cluster)
+ q->limits = *limits;
+
+ if (limits->no_cluster)
queue_flag_clear_unlocked(QUEUE_FLAG_CLUSTER, q);
else
queue_flag_set_unlocked(QUEUE_FLAG_CLUSTER, q);
dm_table_set_integrity(t);
+
+ /*
+ * QUEUE_FLAG_STACKABLE must be set after all queue settings are
+ * visible to other CPUs because, once the flag is set, incoming bios
+ * are processed by request-based dm, which refers to the queue
+ * settings.
+ * Until the flag set, bios are passed to bio-based dm and queued to
+ * md->deferred where queue settings are not needed yet.
+ * Those bios are passed to request-based dm at the resume time.
+ */
+ smp_mb();
+ if (dm_table_request_based(t))
+ queue_flag_set_unlocked(QUEUE_FLAG_STACKABLE, q);
}
unsigned int dm_table_get_num_targets(struct dm_table *t)
@@ -1023,6 +1196,20 @@ int dm_table_any_congested(struct dm_table *t, int bdi_bits)
return r;
}
+int dm_table_any_busy_target(struct dm_table *t)
+{
+ unsigned i;
+ struct dm_target *ti;
+
+ for (i = 0; i < t->num_targets; i++) {
+ ti = t->targets + i;
+ if (ti->type->busy && ti->type->busy(ti))
+ return 1;
+ }
+
+ return 0;
+}
+
void dm_table_unplug_all(struct dm_table *t)
{
struct dm_dev_internal *dd;
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 48db308fae67..9acd54a5cffb 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -24,6 +24,13 @@
#define DM_MSG_PREFIX "core"
+/*
+ * Cookies are numeric values sent with CHANGE and REMOVE
+ * uevents while resuming, removing or renaming the device.
+ */
+#define DM_COOKIE_ENV_VAR_NAME "DM_COOKIE"
+#define DM_COOKIE_LENGTH 24
+
static const char *_name = DM_NAME;
static unsigned int major = 0;
@@ -71,7 +78,7 @@ struct dm_rq_target_io {
*/
struct dm_rq_clone_bio_info {
struct bio *orig;
- struct request *rq;
+ struct dm_rq_target_io *tio;
};
union map_info *dm_get_mapinfo(struct bio *bio)
@@ -81,6 +88,14 @@ union map_info *dm_get_mapinfo(struct bio *bio)
return NULL;
}
+union map_info *dm_get_rq_mapinfo(struct request *rq)
+{
+ if (rq && rq->end_io_data)
+ return &((struct dm_rq_target_io *)rq->end_io_data)->info;
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(dm_get_rq_mapinfo);
+
#define MINOR_ALLOCED ((void *)-1)
/*
@@ -157,13 +172,31 @@ struct mapped_device {
* freeze/thaw support require holding onto a super block
*/
struct super_block *frozen_sb;
- struct block_device *suspended_bdev;
+ struct block_device *bdev;
/* forced geometry settings */
struct hd_geometry geometry;
+ /* marker of flush suspend for request-based dm */
+ struct request suspend_rq;
+
+ /* For saving the address of __make_request for request based dm */
+ make_request_fn *saved_make_request_fn;
+
/* sysfs handle */
struct kobject kobj;
+
+ /* zero-length barrier that will be cloned and submitted to targets */
+ struct bio barrier_bio;
+};
+
+/*
+ * For mempools pre-allocation at the table loading time.
+ */
+struct dm_md_mempools {
+ mempool_t *io_pool;
+ mempool_t *tio_pool;
+ struct bio_set *bs;
};
#define MIN_IOS 256
@@ -391,14 +424,29 @@ static void free_io(struct mapped_device *md, struct dm_io *io)
mempool_free(io, md->io_pool);
}
-static struct dm_target_io *alloc_tio(struct mapped_device *md)
+static void free_tio(struct mapped_device *md, struct dm_target_io *tio)
{
- return mempool_alloc(md->tio_pool, GFP_NOIO);
+ mempool_free(tio, md->tio_pool);
}
-static void free_tio(struct mapped_device *md, struct dm_target_io *tio)
+static struct dm_rq_target_io *alloc_rq_tio(struct mapped_device *md)
{
- mempool_free(tio, md->tio_pool);
+ return mempool_alloc(md->tio_pool, GFP_ATOMIC);
+}
+
+static void free_rq_tio(struct dm_rq_target_io *tio)
+{
+ mempool_free(tio, tio->md->tio_pool);
+}
+
+static struct dm_rq_clone_bio_info *alloc_bio_info(struct mapped_device *md)
+{
+ return mempool_alloc(md->io_pool, GFP_ATOMIC);
+}
+
+static void free_bio_info(struct dm_rq_clone_bio_info *info)
+{
+ mempool_free(info, info->tio->md->io_pool);
}
static void start_io_acct(struct dm_io *io)
@@ -464,12 +512,13 @@ static void queue_io(struct mapped_device *md, struct bio *bio)
struct dm_table *dm_get_table(struct mapped_device *md)
{
struct dm_table *t;
+ unsigned long flags;
- read_lock(&md->map_lock);
+ read_lock_irqsave(&md->map_lock, flags);
t = md->map;
if (t)
dm_table_get(t);
- read_unlock(&md->map_lock);
+ read_unlock_irqrestore(&md->map_lock, flags);
return t;
}
@@ -536,9 +585,11 @@ static void dec_pending(struct dm_io *io, int error)
* Target requested pushing back the I/O.
*/
spin_lock_irqsave(&md->deferred_lock, flags);
- if (__noflush_suspending(md))
- bio_list_add_head(&md->deferred, io->bio);
- else
+ if (__noflush_suspending(md)) {
+ if (!bio_barrier(io->bio))
+ bio_list_add_head(&md->deferred,
+ io->bio);
+ } else
/* noflush suspend was interrupted. */
io->error = -EIO;
spin_unlock_irqrestore(&md->deferred_lock, flags);
@@ -553,7 +604,8 @@ static void dec_pending(struct dm_io *io, int error)
* a per-device variable for error reporting.
* Note that you can't touch the bio after end_io_acct
*/
- md->barrier_error = io_error;
+ if (!md->barrier_error && io_error != -EOPNOTSUPP)
+ md->barrier_error = io_error;
end_io_acct(io);
} else {
end_io_acct(io);
@@ -607,6 +659,262 @@ static void clone_endio(struct bio *bio, int error)
dec_pending(io, error);
}
+/*
+ * Partial completion handling for request-based dm
+ */
+static void end_clone_bio(struct bio *clone, int error)
+{
+ struct dm_rq_clone_bio_info *info = clone->bi_private;
+ struct dm_rq_target_io *tio = info->tio;
+ struct bio *bio = info->orig;
+ unsigned int nr_bytes = info->orig->bi_size;
+
+ bio_put(clone);
+
+ if (tio->error)
+ /*
+ * An error has already been detected on the request.
+ * Once error occurred, just let clone->end_io() handle
+ * the remainder.
+ */
+ return;
+ else if (error) {
+ /*
+ * Don't notice the error to the upper layer yet.
+ * The error handling decision is made by the target driver,
+ * when the request is completed.
+ */
+ tio->error = error;
+ return;
+ }
+
+ /*
+ * I/O for the bio successfully completed.
+ * Notice the data completion to the upper layer.
+ */
+
+ /*
+ * bios are processed from the head of the list.
+ * So the completing bio should always be rq->bio.
+ * If it's not, something wrong is happening.
+ */
+ if (tio->orig->bio != bio)
+ DMERR("bio completion is going in the middle of the request");
+
+ /*
+ * Update the original request.
+ * Do not use blk_end_request() here, because it may complete
+ * the original request before the clone, and break the ordering.
+ */
+ blk_update_request(tio->orig, 0, nr_bytes);
+}
+
+/*
+ * Don't touch any member of the md after calling this function because
+ * the md may be freed in dm_put() at the end of this function.
+ * Or do dm_get() before calling this function and dm_put() later.
+ */
+static void rq_completed(struct mapped_device *md, int run_queue)
+{
+ int wakeup_waiters = 0;
+ struct request_queue *q = md->queue;
+ unsigned long flags;
+
+ spin_lock_irqsave(q->queue_lock, flags);
+ if (!queue_in_flight(q))
+ wakeup_waiters = 1;
+ spin_unlock_irqrestore(q->queue_lock, flags);
+
+ /* nudge anyone waiting on suspend queue */
+ if (wakeup_waiters)
+ wake_up(&md->wait);
+
+ if (run_queue)
+ blk_run_queue(q);
+
+ /*
+ * dm_put() must be at the end of this function. See the comment above
+ */
+ dm_put(md);
+}
+
+static void dm_unprep_request(struct request *rq)
+{
+ struct request *clone = rq->special;
+ struct dm_rq_target_io *tio = clone->end_io_data;
+
+ rq->special = NULL;
+ rq->cmd_flags &= ~REQ_DONTPREP;
+
+ blk_rq_unprep_clone(clone);
+ free_rq_tio(tio);
+}
+
+/*
+ * Requeue the original request of a clone.
+ */
+void dm_requeue_unmapped_request(struct request *clone)
+{
+ struct dm_rq_target_io *tio = clone->end_io_data;
+ struct mapped_device *md = tio->md;
+ struct request *rq = tio->orig;
+ struct request_queue *q = rq->q;
+ unsigned long flags;
+
+ dm_unprep_request(rq);
+
+ spin_lock_irqsave(q->queue_lock, flags);
+ if (elv_queue_empty(q))
+ blk_plug_device(q);
+ blk_requeue_request(q, rq);
+ spin_unlock_irqrestore(q->queue_lock, flags);
+
+ rq_completed(md, 0);
+}
+EXPORT_SYMBOL_GPL(dm_requeue_unmapped_request);
+
+static void __stop_queue(struct request_queue *q)
+{
+ blk_stop_queue(q);
+}
+
+static void stop_queue(struct request_queue *q)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(q->queue_lock, flags);
+ __stop_queue(q);
+ spin_unlock_irqrestore(q->queue_lock, flags);
+}
+
+static void __start_queue(struct request_queue *q)
+{
+ if (blk_queue_stopped(q))
+ blk_start_queue(q);
+}
+
+static void start_queue(struct request_queue *q)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(q->queue_lock, flags);
+ __start_queue(q);
+ spin_unlock_irqrestore(q->queue_lock, flags);
+}
+
+/*
+ * Complete the clone and the original request.
+ * Must be called without queue lock.
+ */
+static void dm_end_request(struct request *clone, int error)
+{
+ struct dm_rq_target_io *tio = clone->end_io_data;
+ struct mapped_device *md = tio->md;
+ struct request *rq = tio->orig;
+
+ if (blk_pc_request(rq)) {
+ rq->errors = clone->errors;
+ rq->resid_len = clone->resid_len;
+
+ if (rq->sense)
+ /*
+ * We are using the sense buffer of the original
+ * request.
+ * So setting the length of the sense data is enough.
+ */
+ rq->sense_len = clone->sense_len;
+ }
+
+ BUG_ON(clone->bio);
+ free_rq_tio(tio);
+
+ blk_end_request_all(rq, error);
+
+ rq_completed(md, 1);
+}
+
+/*
+ * Request completion handler for request-based dm
+ */
+static void dm_softirq_done(struct request *rq)
+{
+ struct request *clone = rq->completion_data;
+ struct dm_rq_target_io *tio = clone->end_io_data;
+ dm_request_endio_fn rq_end_io = tio->ti->type->rq_end_io;
+ int error = tio->error;
+
+ if (!(rq->cmd_flags & REQ_FAILED) && rq_end_io)
+ error = rq_end_io(tio->ti, clone, error, &tio->info);
+
+ if (error <= 0)
+ /* The target wants to complete the I/O */
+ dm_end_request(clone, error);
+ else if (error == DM_ENDIO_INCOMPLETE)
+ /* The target will handle the I/O */
+ return;
+ else if (error == DM_ENDIO_REQUEUE)
+ /* The target wants to requeue the I/O */
+ dm_requeue_unmapped_request(clone);
+ else {
+ DMWARN("unimplemented target endio return value: %d", error);
+ BUG();
+ }
+}
+
+/*
+ * Complete the clone and the original request with the error status
+ * through softirq context.
+ */
+static void dm_complete_request(struct request *clone, int error)
+{
+ struct dm_rq_target_io *tio = clone->end_io_data;
+ struct request *rq = tio->orig;
+
+ tio->error = error;
+ rq->completion_data = clone;
+ blk_complete_request(rq);
+}
+
+/*
+ * Complete the not-mapped clone and the original request with the error status
+ * through softirq context.
+ * Target's rq_end_io() function isn't called.
+ * This may be used when the target's map_rq() function fails.
+ */
+void dm_kill_unmapped_request(struct request *clone, int error)
+{
+ struct dm_rq_target_io *tio = clone->end_io_data;
+ struct request *rq = tio->orig;
+
+ rq->cmd_flags |= REQ_FAILED;
+ dm_complete_request(clone, error);
+}
+EXPORT_SYMBOL_GPL(dm_kill_unmapped_request);
+
+/*
+ * Called with the queue lock held
+ */
+static void end_clone_request(struct request *clone, int error)
+{
+ /*
+ * For just cleaning up the information of the queue in which
+ * the clone was dispatched.
+ * The clone is *NOT* freed actually here because it is alloced from
+ * dm own mempool and REQ_ALLOCED isn't set in clone->cmd_flags.
+ */
+ __blk_put_request(clone->q, clone);
+
+ /*
+ * Actual request completion is done in a softirq context which doesn't
+ * hold the queue lock. Otherwise, deadlock could occur because:
+ * - another request may be submitted by the upper level driver
+ * of the stacking during the completion
+ * - the submission which requires queue lock may be done
+ * against this queue
+ */
+ dm_complete_request(clone, error);
+}
+
static sector_t max_io_len(struct mapped_device *md,
sector_t sector, struct dm_target *ti)
{
@@ -634,11 +942,6 @@ static void __map_bio(struct dm_target *ti, struct bio *clone,
sector_t sector;
struct mapped_device *md;
- /*
- * Sanity checks.
- */
- BUG_ON(!clone->bi_size);
-
clone->bi_end_io = clone_endio;
clone->bi_private = tio;
@@ -714,7 +1017,7 @@ static struct bio *split_bvec(struct bio *bio, sector_t sector,
clone->bi_flags |= 1 << BIO_CLONED;
if (bio_integrity(bio)) {
- bio_integrity_clone(clone, bio, GFP_NOIO);
+ bio_integrity_clone(clone, bio, GFP_NOIO, bs);
bio_integrity_trim(clone,
bio_sector_offset(bio, idx, offset), len);
}
@@ -742,7 +1045,7 @@ static struct bio *clone_bio(struct bio *bio, sector_t sector,
clone->bi_flags &= ~(1 << BIO_SEG_VALID);
if (bio_integrity(bio)) {
- bio_integrity_clone(clone, bio, GFP_NOIO);
+ bio_integrity_clone(clone, bio, GFP_NOIO, bs);
if (idx != bio->bi_idx || clone->bi_size < bio->bi_size)
bio_integrity_trim(clone,
@@ -752,6 +1055,48 @@ static struct bio *clone_bio(struct bio *bio, sector_t sector,
return clone;
}
+static struct dm_target_io *alloc_tio(struct clone_info *ci,
+ struct dm_target *ti)
+{
+ struct dm_target_io *tio = mempool_alloc(ci->md->tio_pool, GFP_NOIO);
+
+ tio->io = ci->io;
+ tio->ti = ti;
+ memset(&tio->info, 0, sizeof(tio->info));
+
+ return tio;
+}
+
+static void __flush_target(struct clone_info *ci, struct dm_target *ti,
+ unsigned flush_nr)
+{
+ struct dm_target_io *tio = alloc_tio(ci, ti);
+ struct bio *clone;
+
+ tio->info.flush_request = flush_nr;
+
+ clone = bio_alloc_bioset(GFP_NOIO, 0, ci->md->bs);
+ __bio_clone(clone, ci->bio);
+ clone->bi_destructor = dm_bio_destructor;
+
+ __map_bio(ti, clone, tio);
+}
+
+static int __clone_and_map_empty_barrier(struct clone_info *ci)
+{
+ unsigned target_nr = 0, flush_nr;
+ struct dm_target *ti;
+
+ while ((ti = dm_table_get_target(ci->map, target_nr++)))
+ for (flush_nr = 0; flush_nr < ti->num_flush_requests;
+ flush_nr++)
+ __flush_target(ci, ti, flush_nr);
+
+ ci->sector_count = 0;
+
+ return 0;
+}
+
static int __clone_and_map(struct clone_info *ci)
{
struct bio *clone, *bio = ci->bio;
@@ -759,6 +1104,9 @@ static int __clone_and_map(struct clone_info *ci)
sector_t len = 0, max;
struct dm_target_io *tio;
+ if (unlikely(bio_empty_barrier(bio)))
+ return __clone_and_map_empty_barrier(ci);
+
ti = dm_table_find_target(ci->map, ci->sector);
if (!dm_target_is_valid(ti))
return -EIO;
@@ -768,10 +1116,7 @@ static int __clone_and_map(struct clone_info *ci)
/*
* Allocate a target io object.
*/
- tio = alloc_tio(ci->md);
- tio->io = ci->io;
- tio->ti = ti;
- memset(&tio->info, 0, sizeof(tio->info));
+ tio = alloc_tio(ci, ti);
if (ci->sector_count <= max) {
/*
@@ -827,10 +1172,7 @@ static int __clone_and_map(struct clone_info *ci)
max = max_io_len(ci->md, ci->sector, ti);
- tio = alloc_tio(ci->md);
- tio->io = ci->io;
- tio->ti = ti;
- memset(&tio->info, 0, sizeof(tio->info));
+ tio = alloc_tio(ci, ti);
}
len = min(remaining, max);
@@ -865,7 +1207,8 @@ static void __split_and_process_bio(struct mapped_device *md, struct bio *bio)
if (!bio_barrier(bio))
bio_io_error(bio);
else
- md->barrier_error = -EIO;
+ if (!md->barrier_error)
+ md->barrier_error = -EIO;
return;
}
@@ -878,6 +1221,8 @@ static void __split_and_process_bio(struct mapped_device *md, struct bio *bio)
ci.io->md = md;
ci.sector = bio->bi_sector;
ci.sector_count = bio_sectors(bio);
+ if (unlikely(bio_empty_barrier(bio)))
+ ci.sector_count = 1;
ci.idx = bio->bi_idx;
start_io_acct(ci.io);
@@ -925,6 +1270,16 @@ static int dm_merge_bvec(struct request_queue *q,
*/
if (max_size && ti->type->merge)
max_size = ti->type->merge(ti, bvm, biovec, max_size);
+ /*
+ * If the target doesn't support merge method and some of the devices
+ * provided their merge_bvec method (we know this by looking at
+ * queue_max_hw_sectors), then we can't allow bios with multiple vector
+ * entries. So always set max_size to 0, and the code below allows
+ * just one page.
+ */
+ else if (queue_max_hw_sectors(q) <= PAGE_SIZE >> 9)
+
+ max_size = 0;
out_table:
dm_table_put(map);
@@ -943,7 +1298,7 @@ out:
* The request function that just remaps the bio built up by
* dm_merge_bvec.
*/
-static int dm_request(struct request_queue *q, struct bio *bio)
+static int _dm_request(struct request_queue *q, struct bio *bio)
{
int rw = bio_data_dir(bio);
struct mapped_device *md = q->queuedata;
@@ -980,12 +1335,274 @@ static int dm_request(struct request_queue *q, struct bio *bio)
return 0;
}
+static int dm_make_request(struct request_queue *q, struct bio *bio)
+{
+ struct mapped_device *md = q->queuedata;
+
+ if (unlikely(bio_barrier(bio))) {
+ bio_endio(bio, -EOPNOTSUPP);
+ return 0;
+ }
+
+ return md->saved_make_request_fn(q, bio); /* call __make_request() */
+}
+
+static int dm_request_based(struct mapped_device *md)
+{
+ return blk_queue_stackable(md->queue);
+}
+
+static int dm_request(struct request_queue *q, struct bio *bio)
+{
+ struct mapped_device *md = q->queuedata;
+
+ if (dm_request_based(md))
+ return dm_make_request(q, bio);
+
+ return _dm_request(q, bio);
+}
+
+void dm_dispatch_request(struct request *rq)
+{
+ int r;
+
+ if (blk_queue_io_stat(rq->q))
+ rq->cmd_flags |= REQ_IO_STAT;
+
+ rq->start_time = jiffies;
+ r = blk_insert_cloned_request(rq->q, rq);
+ if (r)
+ dm_complete_request(rq, r);
+}
+EXPORT_SYMBOL_GPL(dm_dispatch_request);
+
+static void dm_rq_bio_destructor(struct bio *bio)
+{
+ struct dm_rq_clone_bio_info *info = bio->bi_private;
+ struct mapped_device *md = info->tio->md;
+
+ free_bio_info(info);
+ bio_free(bio, md->bs);
+}
+
+static int dm_rq_bio_constructor(struct bio *bio, struct bio *bio_orig,
+ void *data)
+{
+ struct dm_rq_target_io *tio = data;
+ struct mapped_device *md = tio->md;
+ struct dm_rq_clone_bio_info *info = alloc_bio_info(md);
+
+ if (!info)
+ return -ENOMEM;
+
+ info->orig = bio_orig;
+ info->tio = tio;
+ bio->bi_end_io = end_clone_bio;
+ bio->bi_private = info;
+ bio->bi_destructor = dm_rq_bio_destructor;
+
+ return 0;
+}
+
+static int setup_clone(struct request *clone, struct request *rq,
+ struct dm_rq_target_io *tio)
+{
+ int r = blk_rq_prep_clone(clone, rq, tio->md->bs, GFP_ATOMIC,
+ dm_rq_bio_constructor, tio);
+
+ if (r)
+ return r;
+
+ clone->cmd = rq->cmd;
+ clone->cmd_len = rq->cmd_len;
+ clone->sense = rq->sense;
+ clone->buffer = rq->buffer;
+ clone->end_io = end_clone_request;
+ clone->end_io_data = tio;
+
+ return 0;
+}
+
+static int dm_rq_flush_suspending(struct mapped_device *md)
+{
+ return !md->suspend_rq.special;
+}
+
+/*
+ * Called with the queue lock held.
+ */
+static int dm_prep_fn(struct request_queue *q, struct request *rq)
+{
+ struct mapped_device *md = q->queuedata;
+ struct dm_rq_target_io *tio;
+ struct request *clone;
+
+ if (unlikely(rq == &md->suspend_rq)) {
+ if (dm_rq_flush_suspending(md))
+ return BLKPREP_OK;
+ else
+ /* The flush suspend was interrupted */
+ return BLKPREP_KILL;
+ }
+
+ if (unlikely(rq->special)) {
+ DMWARN("Already has something in rq->special.");
+ return BLKPREP_KILL;
+ }
+
+ tio = alloc_rq_tio(md); /* Only one for each original request */
+ if (!tio)
+ /* -ENOMEM */
+ return BLKPREP_DEFER;
+
+ tio->md = md;
+ tio->ti = NULL;
+ tio->orig = rq;
+ tio->error = 0;
+ memset(&tio->info, 0, sizeof(tio->info));
+
+ clone = &tio->clone;
+ if (setup_clone(clone, rq, tio)) {
+ /* -ENOMEM */
+ free_rq_tio(tio);
+ return BLKPREP_DEFER;
+ }
+
+ rq->special = clone;
+ rq->cmd_flags |= REQ_DONTPREP;
+
+ return BLKPREP_OK;
+}
+
+static void map_request(struct dm_target *ti, struct request *rq,
+ struct mapped_device *md)
+{
+ int r;
+ struct request *clone = rq->special;
+ struct dm_rq_target_io *tio = clone->end_io_data;
+
+ /*
+ * Hold the md reference here for the in-flight I/O.
+ * We can't rely on the reference count by device opener,
+ * because the device may be closed during the request completion
+ * when all bios are completed.
+ * See the comment in rq_completed() too.
+ */
+ dm_get(md);
+
+ tio->ti = ti;
+ r = ti->type->map_rq(ti, clone, &tio->info);
+ switch (r) {
+ case DM_MAPIO_SUBMITTED:
+ /* The target has taken the I/O to submit by itself later */
+ break;
+ case DM_MAPIO_REMAPPED:
+ /* The target has remapped the I/O so dispatch it */
+ dm_dispatch_request(clone);
+ break;
+ case DM_MAPIO_REQUEUE:
+ /* The target wants to requeue the I/O */
+ dm_requeue_unmapped_request(clone);
+ break;
+ default:
+ if (r > 0) {
+ DMWARN("unimplemented target map return value: %d", r);
+ BUG();
+ }
+
+ /* The target wants to complete the I/O */
+ dm_kill_unmapped_request(clone, r);
+ break;
+ }
+}
+
+/*
+ * q->request_fn for request-based dm.
+ * Called with the queue lock held.
+ */
+static void dm_request_fn(struct request_queue *q)
+{
+ struct mapped_device *md = q->queuedata;
+ struct dm_table *map = dm_get_table(md);
+ struct dm_target *ti;
+ struct request *rq;
+
+ /*
+ * For noflush suspend, check blk_queue_stopped() to immediately
+ * quit I/O dispatching.
+ */
+ while (!blk_queue_plugged(q) && !blk_queue_stopped(q)) {
+ rq = blk_peek_request(q);
+ if (!rq)
+ goto plug_and_out;
+
+ if (unlikely(rq == &md->suspend_rq)) { /* Flush suspend maker */
+ if (queue_in_flight(q))
+ /* Not quiet yet. Wait more */
+ goto plug_and_out;
+
+ /* This device should be quiet now */
+ __stop_queue(q);
+ blk_start_request(rq);
+ __blk_end_request_all(rq, 0);
+ wake_up(&md->wait);
+ goto out;
+ }
+
+ ti = dm_table_find_target(map, blk_rq_pos(rq));
+ if (ti->type->busy && ti->type->busy(ti))
+ goto plug_and_out;
+
+ blk_start_request(rq);
+ spin_unlock(q->queue_lock);
+ map_request(ti, rq, md);
+ spin_lock_irq(q->queue_lock);
+ }
+
+ goto out;
+
+plug_and_out:
+ if (!elv_queue_empty(q))
+ /* Some requests still remain, retry later */
+ blk_plug_device(q);
+
+out:
+ dm_table_put(map);
+
+ return;
+}
+
+int dm_underlying_device_busy(struct request_queue *q)
+{
+ return blk_lld_busy(q);
+}
+EXPORT_SYMBOL_GPL(dm_underlying_device_busy);
+
+static int dm_lld_busy(struct request_queue *q)
+{
+ int r;
+ struct mapped_device *md = q->queuedata;
+ struct dm_table *map = dm_get_table(md);
+
+ if (!map || test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags))
+ r = 1;
+ else
+ r = dm_table_any_busy_target(map);
+
+ dm_table_put(map);
+
+ return r;
+}
+
static void dm_unplug_all(struct request_queue *q)
{
struct mapped_device *md = q->queuedata;
struct dm_table *map = dm_get_table(md);
if (map) {
+ if (dm_request_based(md))
+ generic_unplug_device(q);
+
dm_table_unplug_all(map);
dm_table_put(map);
}
@@ -1000,7 +1617,16 @@ static int dm_any_congested(void *congested_data, int bdi_bits)
if (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) {
map = dm_get_table(md);
if (map) {
- r = dm_table_any_congested(map, bdi_bits);
+ /*
+ * Request-based dm cares about only own queue for
+ * the query about congestion status of request_queue
+ */
+ if (dm_request_based(md))
+ r = md->queue->backing_dev_info.state &
+ bdi_bits;
+ else
+ r = dm_table_any_congested(map, bdi_bits);
+
dm_table_put(map);
}
}
@@ -1123,30 +1749,32 @@ static struct mapped_device *alloc_dev(int minor)
INIT_LIST_HEAD(&md->uevent_list);
spin_lock_init(&md->uevent_lock);
- md->queue = blk_alloc_queue(GFP_KERNEL);
+ md->queue = blk_init_queue(dm_request_fn, NULL);
if (!md->queue)
goto bad_queue;
+ /*
+ * Request-based dm devices cannot be stacked on top of bio-based dm
+ * devices. The type of this dm device has not been decided yet,
+ * although we initialized the queue using blk_init_queue().
+ * The type is decided at the first table loading time.
+ * To prevent problematic device stacking, clear the queue flag
+ * for request stacking support until then.
+ *
+ * This queue is new, so no concurrency on the queue_flags.
+ */
+ queue_flag_clear_unlocked(QUEUE_FLAG_STACKABLE, md->queue);
+ md->saved_make_request_fn = md->queue->make_request_fn;
md->queue->queuedata = md;
md->queue->backing_dev_info.congested_fn = dm_any_congested;
md->queue->backing_dev_info.congested_data = md;
blk_queue_make_request(md->queue, dm_request);
- blk_queue_ordered(md->queue, QUEUE_ORDERED_DRAIN, NULL);
blk_queue_bounce_limit(md->queue, BLK_BOUNCE_ANY);
md->queue->unplug_fn = dm_unplug_all;
blk_queue_merge_bvec(md->queue, dm_merge_bvec);
-
- md->io_pool = mempool_create_slab_pool(MIN_IOS, _io_cache);
- if (!md->io_pool)
- goto bad_io_pool;
-
- md->tio_pool = mempool_create_slab_pool(MIN_IOS, _tio_cache);
- if (!md->tio_pool)
- goto bad_tio_pool;
-
- md->bs = bioset_create(16, 0);
- if (!md->bs)
- goto bad_no_bioset;
+ blk_queue_softirq_done(md->queue, dm_softirq_done);
+ blk_queue_prep_rq(md->queue, dm_prep_fn);
+ blk_queue_lld_busy(md->queue, dm_lld_busy);
md->disk = alloc_disk(1);
if (!md->disk)
@@ -1170,6 +1798,10 @@ static struct mapped_device *alloc_dev(int minor)
if (!md->wq)
goto bad_thread;
+ md->bdev = bdget_disk(md->disk, 0);
+ if (!md->bdev)
+ goto bad_bdev;
+
/* Populate the mapping, nobody knows we exist yet */
spin_lock(&_minor_lock);
old_md = idr_replace(&_minor_idr, md, minor);
@@ -1179,15 +1811,11 @@ static struct mapped_device *alloc_dev(int minor)
return md;
+bad_bdev:
+ destroy_workqueue(md->wq);
bad_thread:
put_disk(md->disk);
bad_disk:
- bioset_free(md->bs);
-bad_no_bioset:
- mempool_destroy(md->tio_pool);
-bad_tio_pool:
- mempool_destroy(md->io_pool);
-bad_io_pool:
blk_cleanup_queue(md->queue);
bad_queue:
free_minor(minor);
@@ -1204,14 +1832,15 @@ static void free_dev(struct mapped_device *md)
{
int minor = MINOR(disk_devt(md->disk));
- if (md->suspended_bdev) {
- unlock_fs(md);
- bdput(md->suspended_bdev);
- }
+ unlock_fs(md);
+ bdput(md->bdev);
destroy_workqueue(md->wq);
- mempool_destroy(md->tio_pool);
- mempool_destroy(md->io_pool);
- bioset_free(md->bs);
+ if (md->tio_pool)
+ mempool_destroy(md->tio_pool);
+ if (md->io_pool)
+ mempool_destroy(md->io_pool);
+ if (md->bs)
+ bioset_free(md->bs);
blk_integrity_unregister(md->disk);
del_gendisk(md->disk);
free_minor(minor);
@@ -1226,6 +1855,29 @@ static void free_dev(struct mapped_device *md)
kfree(md);
}
+static void __bind_mempools(struct mapped_device *md, struct dm_table *t)
+{
+ struct dm_md_mempools *p;
+
+ if (md->io_pool && md->tio_pool && md->bs)
+ /* the md already has necessary mempools */
+ goto out;
+
+ p = dm_table_get_md_mempools(t);
+ BUG_ON(!p || md->io_pool || md->tio_pool || md->bs);
+
+ md->io_pool = p->io_pool;
+ p->io_pool = NULL;
+ md->tio_pool = p->tio_pool;
+ p->tio_pool = NULL;
+ md->bs = p->bs;
+ p->bs = NULL;
+
+out:
+ /* mempool bind completed, now no need any mempools in the table */
+ dm_table_free_md_mempools(t);
+}
+
/*
* Bind a table to the device.
*/
@@ -1249,15 +1901,17 @@ static void __set_size(struct mapped_device *md, sector_t size)
{
set_capacity(md->disk, size);
- mutex_lock(&md->suspended_bdev->bd_inode->i_mutex);
- i_size_write(md->suspended_bdev->bd_inode, (loff_t)size << SECTOR_SHIFT);
- mutex_unlock(&md->suspended_bdev->bd_inode->i_mutex);
+ mutex_lock(&md->bdev->bd_inode->i_mutex);
+ i_size_write(md->bdev->bd_inode, (loff_t)size << SECTOR_SHIFT);
+ mutex_unlock(&md->bdev->bd_inode->i_mutex);
}
-static int __bind(struct mapped_device *md, struct dm_table *t)
+static int __bind(struct mapped_device *md, struct dm_table *t,
+ struct queue_limits *limits)
{
struct request_queue *q = md->queue;
sector_t size;
+ unsigned long flags;
size = dm_table_get_size(t);
@@ -1267,8 +1921,7 @@ static int __bind(struct mapped_device *md, struct dm_table *t)
if (size != get_capacity(md->disk))
memset(&md->geometry, 0, sizeof(md->geometry));
- if (md->suspended_bdev)
- __set_size(md, size);
+ __set_size(md, size);
if (!size) {
dm_table_destroy(t);
@@ -1277,10 +1930,22 @@ static int __bind(struct mapped_device *md, struct dm_table *t)
dm_table_event_callback(t, event_callback, md);
- write_lock(&md->map_lock);
+ /*
+ * The queue hasn't been stopped yet, if the old table type wasn't
+ * for request-based during suspension. So stop it to prevent
+ * I/O mapping before resume.
+ * This must be done before setting the queue restrictions,
+ * because request-based dm may be run just after the setting.
+ */
+ if (dm_table_request_based(t) && !blk_queue_stopped(q))
+ stop_queue(q);
+
+ __bind_mempools(md, t);
+
+ write_lock_irqsave(&md->map_lock, flags);
md->map = t;
- dm_table_set_restrictions(t, q);
- write_unlock(&md->map_lock);
+ dm_table_set_restrictions(t, q, limits);
+ write_unlock_irqrestore(&md->map_lock, flags);
return 0;
}
@@ -1288,14 +1953,15 @@ static int __bind(struct mapped_device *md, struct dm_table *t)
static void __unbind(struct mapped_device *md)
{
struct dm_table *map = md->map;
+ unsigned long flags;
if (!map)
return;
dm_table_event_callback(map, NULL, NULL);
- write_lock(&md->map_lock);
+ write_lock_irqsave(&md->map_lock, flags);
md->map = NULL;
- write_unlock(&md->map_lock);
+ write_unlock_irqrestore(&md->map_lock, flags);
dm_table_destroy(map);
}
@@ -1399,6 +2065,8 @@ static int dm_wait_for_completion(struct mapped_device *md, int interruptible)
{
int r = 0;
DECLARE_WAITQUEUE(wait, current);
+ struct request_queue *q = md->queue;
+ unsigned long flags;
dm_unplug_all(md->queue);
@@ -1408,7 +2076,14 @@ static int dm_wait_for_completion(struct mapped_device *md, int interruptible)
set_current_state(interruptible);
smp_mb();
- if (!atomic_read(&md->pending))
+ if (dm_request_based(md)) {
+ spin_lock_irqsave(q->queue_lock, flags);
+ if (!queue_in_flight(q) && blk_queue_stopped(q)) {
+ spin_unlock_irqrestore(q->queue_lock, flags);
+ break;
+ }
+ spin_unlock_irqrestore(q->queue_lock, flags);
+ } else if (!atomic_read(&md->pending))
break;
if (interruptible == TASK_INTERRUPTIBLE &&
@@ -1426,34 +2101,36 @@ static int dm_wait_for_completion(struct mapped_device *md, int interruptible)
return r;
}
-static int dm_flush(struct mapped_device *md)
+static void dm_flush(struct mapped_device *md)
{
dm_wait_for_completion(md, TASK_UNINTERRUPTIBLE);
- return 0;
+
+ bio_init(&md->barrier_bio);
+ md->barrier_bio.bi_bdev = md->bdev;
+ md->barrier_bio.bi_rw = WRITE_BARRIER;
+ __split_and_process_bio(md, &md->barrier_bio);
+
+ dm_wait_for_completion(md, TASK_UNINTERRUPTIBLE);
}
static void process_barrier(struct mapped_device *md, struct bio *bio)
{
- int error = dm_flush(md);
-
- if (unlikely(error)) {
- bio_endio(bio, error);
- return;
- }
- if (bio_empty_barrier(bio)) {
- bio_endio(bio, 0);
- return;
- }
-
- __split_and_process_bio(md, bio);
+ md->barrier_error = 0;
- error = dm_flush(md);
+ dm_flush(md);
- if (!error && md->barrier_error)
- error = md->barrier_error;
+ if (!bio_empty_barrier(bio)) {
+ __split_and_process_bio(md, bio);
+ dm_flush(md);
+ }
if (md->barrier_error != DM_ENDIO_REQUEUE)
- bio_endio(bio, error);
+ bio_endio(bio, md->barrier_error);
+ else {
+ spin_lock_irq(&md->deferred_lock);
+ bio_list_add_head(&md->deferred, bio);
+ spin_unlock_irq(&md->deferred_lock);
+ }
}
/*
@@ -1479,10 +2156,14 @@ static void dm_wq_work(struct work_struct *work)
up_write(&md->io_lock);
- if (bio_barrier(c))
- process_barrier(md, c);
- else
- __split_and_process_bio(md, c);
+ if (dm_request_based(md))
+ generic_make_request(c);
+ else {
+ if (bio_barrier(c))
+ process_barrier(md, c);
+ else
+ __split_and_process_bio(md, c);
+ }
down_write(&md->io_lock);
}
@@ -1502,6 +2183,7 @@ static void dm_queue_flush(struct mapped_device *md)
*/
int dm_swap_table(struct mapped_device *md, struct dm_table *table)
{
+ struct queue_limits limits;
int r = -EINVAL;
mutex_lock(&md->suspend_lock);
@@ -1510,19 +2192,96 @@ int dm_swap_table(struct mapped_device *md, struct dm_table *table)
if (!dm_suspended(md))
goto out;
- /* without bdev, the device size cannot be changed */
- if (!md->suspended_bdev)
- if (get_capacity(md->disk) != dm_table_get_size(table))
- goto out;
+ r = dm_calculate_queue_limits(table, &limits);
+ if (r)
+ goto out;
+
+ /* cannot change the device type, once a table is bound */
+ if (md->map &&
+ (dm_table_get_type(md->map) != dm_table_get_type(table))) {
+ DMWARN("can't change the device type after a table is bound");
+ goto out;
+ }
+
+ /*
+ * It is enought that blk_queue_ordered() is called only once when
+ * the first bio-based table is bound.
+ *
+ * This setting should be moved to alloc_dev() when request-based dm
+ * supports barrier.
+ */
+ if (!md->map && dm_table_bio_based(table))
+ blk_queue_ordered(md->queue, QUEUE_ORDERED_DRAIN, NULL);
__unbind(md);
- r = __bind(md, table);
+ r = __bind(md, table, &limits);
out:
mutex_unlock(&md->suspend_lock);
return r;
}
+static void dm_rq_invalidate_suspend_marker(struct mapped_device *md)
+{
+ md->suspend_rq.special = (void *)0x1;
+}
+
+static void dm_rq_abort_suspend(struct mapped_device *md, int noflush)
+{
+ struct request_queue *q = md->queue;
+ unsigned long flags;
+
+ spin_lock_irqsave(q->queue_lock, flags);
+ if (!noflush)
+ dm_rq_invalidate_suspend_marker(md);
+ __start_queue(q);
+ spin_unlock_irqrestore(q->queue_lock, flags);
+}
+
+static void dm_rq_start_suspend(struct mapped_device *md, int noflush)
+{
+ struct request *rq = &md->suspend_rq;
+ struct request_queue *q = md->queue;
+
+ if (noflush)
+ stop_queue(q);
+ else {
+ blk_rq_init(q, rq);
+ blk_insert_request(q, rq, 0, NULL);
+ }
+}
+
+static int dm_rq_suspend_available(struct mapped_device *md, int noflush)
+{
+ int r = 1;
+ struct request *rq = &md->suspend_rq;
+ struct request_queue *q = md->queue;
+ unsigned long flags;
+
+ if (noflush)
+ return r;
+
+ /* The marker must be protected by queue lock if it is in use */
+ spin_lock_irqsave(q->queue_lock, flags);
+ if (unlikely(rq->ref_count)) {
+ /*
+ * This can happen, when the previous flush suspend was
+ * interrupted, the marker is still in the queue and
+ * this flush suspend has been invoked, because we don't
+ * remove the marker at the time of suspend interruption.
+ * We have only one marker per mapped_device, so we can't
+ * start another flush suspend while it is in use.
+ */
+ BUG_ON(!rq->special); /* The marker should be invalidated */
+ DMWARN("Invalidating the previous flush suspend is still in"
+ " progress. Please retry later.");
+ r = 0;
+ }
+ spin_unlock_irqrestore(q->queue_lock, flags);
+
+ return r;
+}
+
/*
* Functions to lock and unlock any filesystem running on the
* device.
@@ -1533,7 +2292,7 @@ static int lock_fs(struct mapped_device *md)
WARN_ON(md->frozen_sb);
- md->frozen_sb = freeze_bdev(md->suspended_bdev);
+ md->frozen_sb = freeze_bdev(md->bdev);
if (IS_ERR(md->frozen_sb)) {
r = PTR_ERR(md->frozen_sb);
md->frozen_sb = NULL;
@@ -1542,9 +2301,6 @@ static int lock_fs(struct mapped_device *md)
set_bit(DMF_FROZEN, &md->flags);
- /* don't bdput right now, we don't want the bdev
- * to go away while it is locked.
- */
return 0;
}
@@ -1553,7 +2309,7 @@ static void unlock_fs(struct mapped_device *md)
if (!test_bit(DMF_FROZEN, &md->flags))
return;
- thaw_bdev(md->suspended_bdev, md->frozen_sb);
+ thaw_bdev(md->bdev, md->frozen_sb);
md->frozen_sb = NULL;
clear_bit(DMF_FROZEN, &md->flags);
}
@@ -1565,6 +2321,53 @@ static void unlock_fs(struct mapped_device *md)
* dm_bind_table, dm_suspend must be called to flush any in
* flight bios and ensure that any further io gets deferred.
*/
+/*
+ * Suspend mechanism in request-based dm.
+ *
+ * After the suspend starts, further incoming requests are kept in
+ * the request_queue and deferred.
+ * Remaining requests in the request_queue at the start of suspend are flushed
+ * if it is flush suspend.
+ * The suspend completes when the following conditions have been satisfied,
+ * so wait for it:
+ * 1. q->in_flight is 0 (which means no in_flight request)
+ * 2. queue has been stopped (which means no request dispatching)
+ *
+ *
+ * Noflush suspend
+ * ---------------
+ * Noflush suspend doesn't need to dispatch remaining requests.
+ * So stop the queue immediately. Then, wait for all in_flight requests
+ * to be completed or requeued.
+ *
+ * To abort noflush suspend, start the queue.
+ *
+ *
+ * Flush suspend
+ * -------------
+ * Flush suspend needs to dispatch remaining requests. So stop the queue
+ * after the remaining requests are completed. (Requeued request must be also
+ * re-dispatched and completed. Until then, we can't stop the queue.)
+ *
+ * During flushing the remaining requests, further incoming requests are also
+ * inserted to the same queue. To distinguish which requests are to be
+ * flushed, we insert a marker request to the queue at the time of starting
+ * flush suspend, like a barrier.
+ * The dispatching is blocked when the marker is found on the top of the queue.
+ * And the queue is stopped when all in_flight requests are completed, since
+ * that means the remaining requests are completely flushed.
+ * Then, the marker is removed from the queue.
+ *
+ * To abort flush suspend, we also need to take care of the marker, not only
+ * starting the queue.
+ * We don't remove the marker forcibly from the queue since it's against
+ * the block-layer manner. Instead, we put a invalidated mark on the marker.
+ * When the invalidated marker is found on the top of the queue, it is
+ * immediately removed from the queue, so it doesn't block dispatching.
+ * Because we have only one marker per mapped_device, we can't start another
+ * flush suspend until the invalidated marker is removed from the queue.
+ * So fail and return with -EBUSY in such a case.
+ */
int dm_suspend(struct mapped_device *md, unsigned suspend_flags)
{
struct dm_table *map = NULL;
@@ -1579,6 +2382,11 @@ int dm_suspend(struct mapped_device *md, unsigned suspend_flags)
goto out_unlock;
}
+ if (dm_request_based(md) && !dm_rq_suspend_available(md, noflush)) {
+ r = -EBUSY;
+ goto out_unlock;
+ }
+
map = dm_get_table(md);
/*
@@ -1591,24 +2399,14 @@ int dm_suspend(struct mapped_device *md, unsigned suspend_flags)
/* This does not get reverted if there's an error later. */
dm_table_presuspend_targets(map);
- /* bdget() can stall if the pending I/Os are not flushed */
- if (!noflush) {
- md->suspended_bdev = bdget_disk(md->disk, 0);
- if (!md->suspended_bdev) {
- DMWARN("bdget failed in dm_suspend");
- r = -ENOMEM;
+ /*
+ * Flush I/O to the device. noflush supersedes do_lockfs,
+ * because lock_fs() needs to flush I/Os.
+ */
+ if (!noflush && do_lockfs) {
+ r = lock_fs(md);
+ if (r)
goto out;
- }
-
- /*
- * Flush I/O to the device. noflush supersedes do_lockfs,
- * because lock_fs() needs to flush I/Os.
- */
- if (do_lockfs) {
- r = lock_fs(md);
- if (r)
- goto out;
- }
}
/*
@@ -1634,6 +2432,9 @@ int dm_suspend(struct mapped_device *md, unsigned suspend_flags)
flush_workqueue(md->wq);
+ if (dm_request_based(md))
+ dm_rq_start_suspend(md, noflush);
+
/*
* At this point no more requests are entering target request routines.
* We call dm_wait_for_completion to wait for all existing requests
@@ -1650,6 +2451,9 @@ int dm_suspend(struct mapped_device *md, unsigned suspend_flags)
if (r < 0) {
dm_queue_flush(md);
+ if (dm_request_based(md))
+ dm_rq_abort_suspend(md, noflush);
+
unlock_fs(md);
goto out; /* pushback list is already flushed, so skip flush */
}
@@ -1665,11 +2469,6 @@ int dm_suspend(struct mapped_device *md, unsigned suspend_flags)
set_bit(DMF_SUSPENDED, &md->flags);
out:
- if (r && md->suspended_bdev) {
- bdput(md->suspended_bdev);
- md->suspended_bdev = NULL;
- }
-
dm_table_put(map);
out_unlock:
@@ -1696,21 +2495,20 @@ int dm_resume(struct mapped_device *md)
dm_queue_flush(md);
- unlock_fs(md);
+ /*
+ * Flushing deferred I/Os must be done after targets are resumed
+ * so that mapping of targets can work correctly.
+ * Request-based dm is queueing the deferred I/Os in its request_queue.
+ */
+ if (dm_request_based(md))
+ start_queue(md->queue);
- if (md->suspended_bdev) {
- bdput(md->suspended_bdev);
- md->suspended_bdev = NULL;
- }
+ unlock_fs(md);
clear_bit(DMF_SUSPENDED, &md->flags);
dm_table_unplug_all(map);
-
- dm_kobject_uevent(md);
-
r = 0;
-
out:
dm_table_put(map);
mutex_unlock(&md->suspend_lock);
@@ -1721,9 +2519,19 @@ out:
/*-----------------------------------------------------------------
* Event notification.
*---------------------------------------------------------------*/
-void dm_kobject_uevent(struct mapped_device *md)
-{
- kobject_uevent(&disk_to_dev(md->disk)->kobj, KOBJ_CHANGE);
+void dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
+ unsigned cookie)
+{
+ char udev_cookie[DM_COOKIE_LENGTH];
+ char *envp[] = { udev_cookie, NULL };
+
+ if (!cookie)
+ kobject_uevent(&disk_to_dev(md->disk)->kobj, action);
+ else {
+ snprintf(udev_cookie, DM_COOKIE_LENGTH, "%s=%u",
+ DM_COOKIE_ENV_VAR_NAME, cookie);
+ kobject_uevent_env(&disk_to_dev(md->disk)->kobj, action, envp);
+ }
}
uint32_t dm_next_uevent_seq(struct mapped_device *md)
@@ -1777,6 +2585,10 @@ struct mapped_device *dm_get_from_kobject(struct kobject *kobj)
if (&md->kobj != kobj)
return NULL;
+ if (test_bit(DMF_FREEING, &md->flags) ||
+ test_bit(DMF_DELETING, &md->flags))
+ return NULL;
+
dm_get(md);
return md;
}
@@ -1797,6 +2609,61 @@ int dm_noflush_suspending(struct dm_target *ti)
}
EXPORT_SYMBOL_GPL(dm_noflush_suspending);
+struct dm_md_mempools *dm_alloc_md_mempools(unsigned type)
+{
+ struct dm_md_mempools *pools = kmalloc(sizeof(*pools), GFP_KERNEL);
+
+ if (!pools)
+ return NULL;
+
+ pools->io_pool = (type == DM_TYPE_BIO_BASED) ?
+ mempool_create_slab_pool(MIN_IOS, _io_cache) :
+ mempool_create_slab_pool(MIN_IOS, _rq_bio_info_cache);
+ if (!pools->io_pool)
+ goto free_pools_and_out;
+
+ pools->tio_pool = (type == DM_TYPE_BIO_BASED) ?
+ mempool_create_slab_pool(MIN_IOS, _tio_cache) :
+ mempool_create_slab_pool(MIN_IOS, _rq_tio_cache);
+ if (!pools->tio_pool)
+ goto free_io_pool_and_out;
+
+ pools->bs = (type == DM_TYPE_BIO_BASED) ?
+ bioset_create(16, 0) : bioset_create(MIN_IOS, 0);
+ if (!pools->bs)
+ goto free_tio_pool_and_out;
+
+ return pools;
+
+free_tio_pool_and_out:
+ mempool_destroy(pools->tio_pool);
+
+free_io_pool_and_out:
+ mempool_destroy(pools->io_pool);
+
+free_pools_and_out:
+ kfree(pools);
+
+ return NULL;
+}
+
+void dm_free_md_mempools(struct dm_md_mempools *pools)
+{
+ if (!pools)
+ return;
+
+ if (pools->io_pool)
+ mempool_destroy(pools->io_pool);
+
+ if (pools->tio_pool)
+ mempool_destroy(pools->tio_pool);
+
+ if (pools->bs)
+ bioset_free(pools->bs);
+
+ kfree(pools);
+}
+
static struct block_device_operations dm_blk_dops = {
.open = dm_blk_open,
.release = dm_blk_close,
diff --git a/drivers/md/dm.h b/drivers/md/dm.h
index a31506d93e91..23278ae80f08 100644
--- a/drivers/md/dm.h
+++ b/drivers/md/dm.h
@@ -23,6 +23,13 @@
#define DM_SUSPEND_NOFLUSH_FLAG (1 << 1)
/*
+ * Type of table and mapped_device's mempool
+ */
+#define DM_TYPE_NONE 0
+#define DM_TYPE_BIO_BASED 1
+#define DM_TYPE_REQUEST_BASED 2
+
+/*
* List of devices that a metadevice uses and should open/close.
*/
struct dm_dev_internal {
@@ -32,6 +39,7 @@ struct dm_dev_internal {
};
struct dm_table;
+struct dm_md_mempools;
/*-----------------------------------------------------------------
* Internal table functions.
@@ -41,18 +49,34 @@ void dm_table_event_callback(struct dm_table *t,
void (*fn)(void *), void *context);
struct dm_target *dm_table_get_target(struct dm_table *t, unsigned int index);
struct dm_target *dm_table_find_target(struct dm_table *t, sector_t sector);
-void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q);
+int dm_calculate_queue_limits(struct dm_table *table,
+ struct queue_limits *limits);
+void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
+ struct queue_limits *limits);
struct list_head *dm_table_get_devices(struct dm_table *t);
void dm_table_presuspend_targets(struct dm_table *t);
void dm_table_postsuspend_targets(struct dm_table *t);
int dm_table_resume_targets(struct dm_table *t);
int dm_table_any_congested(struct dm_table *t, int bdi_bits);
+int dm_table_any_busy_target(struct dm_table *t);
+int dm_table_set_type(struct dm_table *t);
+unsigned dm_table_get_type(struct dm_table *t);
+bool dm_table_bio_based(struct dm_table *t);
+bool dm_table_request_based(struct dm_table *t);
+int dm_table_alloc_md_mempools(struct dm_table *t);
+void dm_table_free_md_mempools(struct dm_table *t);
+struct dm_md_mempools *dm_table_get_md_mempools(struct dm_table *t);
/*
* To check the return value from dm_table_find_target().
*/
#define dm_target_is_valid(t) ((t)->table)
+/*
+ * To check whether the target type is request-based or not (bio-based).
+ */
+#define dm_target_request_based(t) ((t)->type->map_rq != NULL)
+
/*-----------------------------------------------------------------
* A registry of target types.
*---------------------------------------------------------------*/
@@ -92,9 +116,16 @@ void dm_stripe_exit(void);
int dm_open_count(struct mapped_device *md);
int dm_lock_for_deletion(struct mapped_device *md);
-void dm_kobject_uevent(struct mapped_device *md);
+void dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
+ unsigned cookie);
int dm_kcopyd_init(void);
void dm_kcopyd_exit(void);
+/*
+ * Mempool operations
+ */
+struct dm_md_mempools *dm_alloc_md_mempools(unsigned type);
+void dm_free_md_mempools(struct dm_md_mempools *pools);
+
#endif
diff --git a/drivers/md/linear.c b/drivers/md/linear.c
index 15c8b7b25a9b..5810fa906af0 100644
--- a/drivers/md/linear.c
+++ b/drivers/md/linear.c
@@ -166,8 +166,8 @@ static linear_conf_t *linear_conf(mddev_t *mddev, int raid_disks)
rdev->sectors = sectors * mddev->chunk_sectors;
}
- blk_queue_stack_limits(mddev->queue,
- rdev->bdev->bd_disk->queue);
+ disk_stack_limits(mddev->gendisk, rdev->bdev,
+ rdev->data_offset << 9);
/* as we don't honour merge_bvec_fn, we must never risk
* violating it, so limit ->max_sector to one PAGE, as
* a one page request is never in violation.
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 09be637d52cb..d4351ff0849f 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -1756,9 +1756,10 @@ static void print_sb_1(struct mdp_superblock_1 *sb)
__u8 *uuid;
uuid = sb->set_uuid;
- printk(KERN_INFO "md: SB: (V:%u) (F:0x%08x) Array-ID:<%02x%02x%02x%02x"
- ":%02x%02x:%02x%02x:%02x%02x:%02x%02x%02x%02x%02x%02x>\n"
- KERN_INFO "md: Name: \"%s\" CT:%llu\n",
+ printk(KERN_INFO
+ "md: SB: (V:%u) (F:0x%08x) Array-ID:<%02x%02x%02x%02x"
+ ":%02x%02x:%02x%02x:%02x%02x:%02x%02x%02x%02x%02x%02x>\n"
+ "md: Name: \"%s\" CT:%llu\n",
le32_to_cpu(sb->major_version),
le32_to_cpu(sb->feature_map),
uuid[0], uuid[1], uuid[2], uuid[3],
@@ -1770,12 +1771,13 @@ static void print_sb_1(struct mdp_superblock_1 *sb)
& MD_SUPERBLOCK_1_TIME_SEC_MASK);
uuid = sb->device_uuid;
- printk(KERN_INFO "md: L%u SZ%llu RD:%u LO:%u CS:%u DO:%llu DS:%llu SO:%llu"
+ printk(KERN_INFO
+ "md: L%u SZ%llu RD:%u LO:%u CS:%u DO:%llu DS:%llu SO:%llu"
" RO:%llu\n"
- KERN_INFO "md: Dev:%08x UUID: %02x%02x%02x%02x:%02x%02x:%02x%02x:%02x%02x"
- ":%02x%02x%02x%02x%02x%02x\n"
- KERN_INFO "md: (F:0x%08x) UT:%llu Events:%llu ResyncOffset:%llu CSUM:0x%08x\n"
- KERN_INFO "md: (MaxDev:%u) \n",
+ "md: Dev:%08x UUID: %02x%02x%02x%02x:%02x%02x:%02x%02x:%02x%02x"
+ ":%02x%02x%02x%02x%02x%02x\n"
+ "md: (F:0x%08x) UT:%llu Events:%llu ResyncOffset:%llu CSUM:0x%08x\n"
+ "md: (MaxDev:%u) \n",
le32_to_cpu(sb->level),
(unsigned long long)le64_to_cpu(sb->size),
le32_to_cpu(sb->raid_disks),
@@ -3573,7 +3575,8 @@ suspend_lo_store(mddev_t *mddev, const char *buf, size_t len)
char *e;
unsigned long long new = simple_strtoull(buf, &e, 10);
- if (mddev->pers->quiesce == NULL)
+ if (mddev->pers == NULL ||
+ mddev->pers->quiesce == NULL)
return -EINVAL;
if (buf == e || (*e && *e != '\n'))
return -EINVAL;
@@ -3601,7 +3604,8 @@ suspend_hi_store(mddev_t *mddev, const char *buf, size_t len)
char *e;
unsigned long long new = simple_strtoull(buf, &e, 10);
- if (mddev->pers->quiesce == NULL)
+ if (mddev->pers == NULL ||
+ mddev->pers->quiesce == NULL)
return -EINVAL;
if (buf == e || (*e && *e != '\n'))
return -EINVAL;
@@ -3844,11 +3848,9 @@ static int md_alloc(dev_t dev, char *name)
flush_scheduled_work();
mutex_lock(&disks_mutex);
- if (mddev->gendisk) {
- mutex_unlock(&disks_mutex);
- mddev_put(mddev);
- return -EEXIST;
- }
+ error = -EEXIST;
+ if (mddev->gendisk)
+ goto abort;
if (name) {
/* Need to ensure that 'name' is not a duplicate.
@@ -3860,17 +3862,15 @@ static int md_alloc(dev_t dev, char *name)
if (mddev2->gendisk &&
strcmp(mddev2->gendisk->disk_name, name) == 0) {
spin_unlock(&all_mddevs_lock);
- return -EEXIST;
+ goto abort;
}
spin_unlock(&all_mddevs_lock);
}
+ error = -ENOMEM;
mddev->queue = blk_alloc_queue(GFP_KERNEL);
- if (!mddev->queue) {
- mutex_unlock(&disks_mutex);
- mddev_put(mddev);
- return -ENOMEM;
- }
+ if (!mddev->queue)
+ goto abort;
mddev->queue->queuedata = mddev;
/* Can be unlocked because the queue is new: no concurrency */
@@ -3880,11 +3880,9 @@ static int md_alloc(dev_t dev, char *name)
disk = alloc_disk(1 << shift);
if (!disk) {
- mutex_unlock(&disks_mutex);
blk_cleanup_queue(mddev->queue);
mddev->queue = NULL;
- mddev_put(mddev);
- return -ENOMEM;
+ goto abort;
}
disk->major = MAJOR(mddev->unit);
disk->first_minor = unit << shift;
@@ -3906,16 +3904,22 @@ static int md_alloc(dev_t dev, char *name)
mddev->gendisk = disk;
error = kobject_init_and_add(&mddev->kobj, &md_ktype,
&disk_to_dev(disk)->kobj, "%s", "md");
- mutex_unlock(&disks_mutex);
- if (error)
+ if (error) {
+ /* This isn't possible, but as kobject_init_and_add is marked
+ * __must_check, we must do something with the result
+ */
printk(KERN_WARNING "md: cannot register %s/md - name in use\n",
disk->disk_name);
- else {
+ error = 0;
+ }
+ abort:
+ mutex_unlock(&disks_mutex);
+ if (!error) {
kobject_uevent(&mddev->kobj, KOBJ_ADD);
mddev->sysfs_state = sysfs_get_dirent(mddev->kobj.sd, "array_state");
}
mddev_put(mddev);
- return 0;
+ return error;
}
static struct kobject *md_probe(dev_t dev, int *part, void *data)
@@ -6334,10 +6338,16 @@ void md_do_sync(mddev_t *mddev)
sysfs_notify(&mddev->kobj, NULL, "sync_completed");
}
- if (j >= mddev->resync_max)
- wait_event(mddev->recovery_wait,
- mddev->resync_max > j
- || kthread_should_stop());
+ while (j >= mddev->resync_max && !kthread_should_stop()) {
+ /* As this condition is controlled by user-space,
+ * we can block indefinitely, so use '_interruptible'
+ * to avoid triggering warnings.
+ */
+ flush_signals(current); /* just in case */
+ wait_event_interruptible(mddev->recovery_wait,
+ mddev->resync_max > j
+ || kthread_should_stop());
+ }
if (kthread_should_stop())
goto interrupted;
diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c
index cbe368fa6598..237fe3fd235c 100644
--- a/drivers/md/multipath.c
+++ b/drivers/md/multipath.c
@@ -294,7 +294,8 @@ static int multipath_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)
for (path = first; path <= last; path++)
if ((p=conf->multipaths+path)->rdev == NULL) {
q = rdev->bdev->bd_disk->queue;
- blk_queue_stack_limits(mddev->queue, q);
+ disk_stack_limits(mddev->gendisk, rdev->bdev,
+ rdev->data_offset << 9);
/* as we don't honour merge_bvec_fn, we must never risk
* violating it, so limit ->max_sector to one PAGE, as
@@ -463,9 +464,9 @@ static int multipath_run (mddev_t *mddev)
disk = conf->multipaths + disk_idx;
disk->rdev = rdev;
+ disk_stack_limits(mddev->gendisk, rdev->bdev,
+ rdev->data_offset << 9);
- blk_queue_stack_limits(mddev->queue,
- rdev->bdev->bd_disk->queue);
/* as we don't honour merge_bvec_fn, we must never risk
* violating it, not that we ever expect a device with
* a merge_bvec_fn to be involved in multipath */
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index ab4a489d8695..335f490dcad6 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -170,8 +170,8 @@ static int create_strip_zones(mddev_t *mddev)
}
dev[j] = rdev1;
- blk_queue_stack_limits(mddev->queue,
- rdev1->bdev->bd_disk->queue);
+ disk_stack_limits(mddev->gendisk, rdev1->bdev,
+ rdev1->data_offset << 9);
/* as we don't honour merge_bvec_fn, we must never risk
* violating it, so limit ->max_sector to one PAGE, as
* a one page request is never in violation.
@@ -250,6 +250,11 @@ static int create_strip_zones(mddev_t *mddev)
mddev->chunk_sectors << 9);
goto abort;
}
+
+ blk_queue_io_min(mddev->queue, mddev->chunk_sectors << 9);
+ blk_queue_io_opt(mddev->queue,
+ (mddev->chunk_sectors << 9) * mddev->raid_disks);
+
printk(KERN_INFO "raid0: done.\n");
mddev->private = conf;
return 0;
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 89939a7aef57..0569efba0c02 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -1123,8 +1123,8 @@ static int raid1_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)
for (mirror = first; mirror <= last; mirror++)
if ( !(p=conf->mirrors+mirror)->rdev) {
- blk_queue_stack_limits(mddev->queue,
- rdev->bdev->bd_disk->queue);
+ disk_stack_limits(mddev->gendisk, rdev->bdev,
+ rdev->data_offset << 9);
/* as we don't honour merge_bvec_fn, we must never risk
* violating it, so limit ->max_sector to one PAGE, as
* a one page request is never in violation.
@@ -1988,9 +1988,8 @@ static int run(mddev_t *mddev)
disk = conf->mirrors + disk_idx;
disk->rdev = rdev;
-
- blk_queue_stack_limits(mddev->queue,
- rdev->bdev->bd_disk->queue);
+ disk_stack_limits(mddev->gendisk, rdev->bdev,
+ rdev->data_offset << 9);
/* as we don't honour merge_bvec_fn, we must never risk
* violating it, so limit ->max_sector to one PAGE, as
* a one page request is never in violation.
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index ae12ceafe10c..7298a5e5a183 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -1151,8 +1151,8 @@ static int raid10_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)
for ( ; mirror <= last ; mirror++)
if ( !(p=conf->mirrors+mirror)->rdev) {
- blk_queue_stack_limits(mddev->queue,
- rdev->bdev->bd_disk->queue);
+ disk_stack_limits(mddev->gendisk, rdev->bdev,
+ rdev->data_offset << 9);
/* as we don't honour merge_bvec_fn, we must never risk
* violating it, so limit ->max_sector to one PAGE, as
* a one page request is never in violation.
@@ -2044,7 +2044,7 @@ raid10_size(mddev_t *mddev, sector_t sectors, int raid_disks)
static int run(mddev_t *mddev)
{
conf_t *conf;
- int i, disk_idx;
+ int i, disk_idx, chunk_size;
mirror_info_t *disk;
mdk_rdev_t *rdev;
int nc, fc, fo;
@@ -2130,6 +2130,14 @@ static int run(mddev_t *mddev)
spin_lock_init(&conf->device_lock);
mddev->queue->queue_lock = &conf->device_lock;
+ chunk_size = mddev->chunk_sectors << 9;
+ blk_queue_io_min(mddev->queue, chunk_size);
+ if (conf->raid_disks % conf->near_copies)
+ blk_queue_io_opt(mddev->queue, chunk_size * conf->raid_disks);
+ else
+ blk_queue_io_opt(mddev->queue, chunk_size *
+ (conf->raid_disks / conf->near_copies));
+
list_for_each_entry(rdev, &mddev->disks, same_set) {
disk_idx = rdev->raid_disk;
if (disk_idx >= mddev->raid_disks
@@ -2138,9 +2146,8 @@ static int run(mddev_t *mddev)
disk = conf->mirrors + disk_idx;
disk->rdev = rdev;
-
- blk_queue_stack_limits(mddev->queue,
- rdev->bdev->bd_disk->queue);
+ disk_stack_limits(mddev->gendisk, rdev->bdev,
+ rdev->data_offset << 9);
/* as we don't honour merge_bvec_fn, we must never risk
* violating it, so limit ->max_sector to one PAGE, as
* a one page request is never in violation.
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index f9f991e6e138..37835538b58e 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -3699,13 +3699,21 @@ static int make_request(struct request_queue *q, struct bio * bi)
goto retry;
}
}
- /* FIXME what if we get a false positive because these
- * are being updated.
- */
- if (logical_sector >= mddev->suspend_lo &&
+
+ if (bio_data_dir(bi) == WRITE &&
+ logical_sector >= mddev->suspend_lo &&
logical_sector < mddev->suspend_hi) {
release_stripe(sh);
- schedule();
+ /* As the suspend_* range is controlled by
+ * userspace, we want an interruptible
+ * wait.
+ */
+ flush_signals(current);
+ prepare_to_wait(&conf->wait_for_overlap,
+ &w, TASK_INTERRUPTIBLE);
+ if (logical_sector >= mddev->suspend_lo &&
+ logical_sector < mddev->suspend_hi)
+ schedule();
goto retry;
}
@@ -4452,7 +4460,7 @@ static raid5_conf_t *setup_conf(mddev_t *mddev)
static int run(mddev_t *mddev)
{
raid5_conf_t *conf;
- int working_disks = 0;
+ int working_disks = 0, chunk_size;
mdk_rdev_t *rdev;
if (mddev->recovery_cp != MaxSector)
@@ -4607,6 +4615,14 @@ static int run(mddev_t *mddev)
md_set_array_sectors(mddev, raid5_size(mddev, 0, 0));
blk_queue_merge_bvec(mddev->queue, raid5_mergeable_bvec);
+ chunk_size = mddev->chunk_sectors << 9;
+ blk_queue_io_min(mddev->queue, chunk_size);
+ blk_queue_io_opt(mddev->queue, chunk_size *
+ (conf->raid_disks - conf->max_degraded));
+
+ list_for_each_entry(rdev, &mddev->disks, same_set)
+ disk_stack_limits(mddev->gendisk, rdev->bdev,
+ rdev->data_offset << 9);
return 0;
abort:
diff --git a/drivers/media/common/ir-keymaps.c b/drivers/media/common/ir-keymaps.c
index 3fe158ac7bbf..4216328552f6 100644
--- a/drivers/media/common/ir-keymaps.c
+++ b/drivers/media/common/ir-keymaps.c
@@ -2750,3 +2750,26 @@ IR_KEYTAB_TYPE ir_codes_dm1105_nec[IR_KEYTAB_SIZE] = {
[0x1b] = KEY_B, /*recall*/
};
EXPORT_SYMBOL_GPL(ir_codes_dm1105_nec);
+
+/* EVGA inDtube
+ Devin Heitmueller <devin.heitmueller@gmail.com>
+ */
+IR_KEYTAB_TYPE ir_codes_evga_indtube[IR_KEYTAB_SIZE] = {
+ [0x12] = KEY_POWER,
+ [0x02] = KEY_MODE, /* TV */
+ [0x14] = KEY_MUTE,
+ [0x1a] = KEY_CHANNELUP,
+ [0x16] = KEY_TV2, /* PIP */
+ [0x1d] = KEY_VOLUMEUP,
+ [0x05] = KEY_CHANNELDOWN,
+ [0x0f] = KEY_PLAYPAUSE,
+ [0x19] = KEY_VOLUMEDOWN,
+ [0x1c] = KEY_REWIND,
+ [0x0d] = KEY_RECORD,
+ [0x18] = KEY_FORWARD,
+ [0x1e] = KEY_PREVIOUS,
+ [0x1b] = KEY_STOP,
+ [0x1f] = KEY_NEXT,
+ [0x13] = KEY_CAMERA,
+};
+EXPORT_SYMBOL_GPL(ir_codes_evga_indtube);
diff --git a/drivers/media/common/tuners/tuner-xc2028.c b/drivers/media/common/tuners/tuner-xc2028.c
index b6da9c3873fe..aa20ce8cc668 100644
--- a/drivers/media/common/tuners/tuner-xc2028.c
+++ b/drivers/media/common/tuners/tuner-xc2028.c
@@ -1096,8 +1096,19 @@ static int xc2028_set_params(struct dvb_frontend *fe,
}
/* All S-code tables need a 200kHz shift */
- if (priv->ctrl.demod)
+ if (priv->ctrl.demod) {
demod = priv->ctrl.demod + 200;
+ /*
+ * The DTV7 S-code table needs a 700 kHz shift.
+ * Thanks to Terry Wu <terrywu2009@gmail.com> for reporting this
+ *
+ * DTV7 is only used in Australia. Germany or Italy may also
+ * use this firmware after initialization, but a tune to a UHF
+ * channel should then cause DTV78 to be used.
+ */
+ if (type & DTV7)
+ demod += 500;
+ }
return generic_set_freq(fe, p->frequency,
T_DIGITAL_TV, type, 0, demod);
diff --git a/drivers/media/dvb/frontends/stv0900.h b/drivers/media/dvb/frontends/stv0900.h
index 8a1332c2031d..bf4e9b633044 100644
--- a/drivers/media/dvb/frontends/stv0900.h
+++ b/drivers/media/dvb/frontends/stv0900.h
@@ -29,6 +29,11 @@
#include <linux/dvb/frontend.h>
#include "dvb_frontend.h"
+struct stv0900_reg {
+ u16 addr;
+ u8 val;
+};
+
struct stv0900_config {
u8 demod_address;
u32 xtal;
@@ -38,7 +43,7 @@ struct stv0900_config {
u8 path1_mode;
u8 path2_mode;
-
+ struct stv0900_reg *ts_config_regs;
u8 tun1_maddress;/* 0, 1, 2, 3 for 0xc0, 0xc2, 0xc4, 0xc6 */
u8 tun2_maddress;
u8 tun1_adc;/* 1 for stv6110, 2 for stb6100 */
diff --git a/drivers/media/dvb/frontends/stv0900_core.c b/drivers/media/dvb/frontends/stv0900_core.c
index 8499bcf7f251..1da045fbb4ef 100644
--- a/drivers/media/dvb/frontends/stv0900_core.c
+++ b/drivers/media/dvb/frontends/stv0900_core.c
@@ -149,31 +149,31 @@ void stv0900_write_reg(struct stv0900_internal *i_params, u16 reg_addr,
dprintk(KERN_ERR "%s: i2c error %d\n", __func__, ret);
}
-u8 stv0900_read_reg(struct stv0900_internal *i_params, u16 reg_addr)
+u8 stv0900_read_reg(struct stv0900_internal *i_params, u16 reg)
{
- u8 data[2];
int ret;
- struct i2c_msg i2cmsg = {
- .addr = i_params->i2c_addr,
- .flags = 0,
- .len = 2,
- .buf = data,
+ u8 b0[] = { MSB(reg), LSB(reg) };
+ u8 buf = 0;
+ struct i2c_msg msg[] = {
+ {
+ .addr = i_params->i2c_addr,
+ .flags = 0,
+ .buf = b0,
+ .len = 2,
+ }, {
+ .addr = i_params->i2c_addr,
+ .flags = I2C_M_RD,
+ .buf = &buf,
+ .len = 1,
+ },
};
- data[0] = MSB(reg_addr);
- data[1] = LSB(reg_addr);
-
- ret = i2c_transfer(i_params->i2c_adap, &i2cmsg, 1);
- if (ret != 1)
- dprintk(KERN_ERR "%s: i2c error %d\n", __func__, ret);
-
- i2cmsg.flags = I2C_M_RD;
- i2cmsg.len = 1;
- ret = i2c_transfer(i_params->i2c_adap, &i2cmsg, 1);
- if (ret != 1)
- dprintk(KERN_ERR "%s: i2c error %d\n", __func__, ret);
+ ret = i2c_transfer(i_params->i2c_adap, msg, 2);
+ if (ret != 2)
+ dprintk(KERN_ERR "%s: i2c error %d, reg[0x%02x]\n",
+ __func__, ret, reg);
- return data[0];
+ return buf;
}
void extract_mask_pos(u32 label, u8 *mask, u8 *pos)
@@ -712,6 +712,44 @@ static s32 stv0900_carr_get_quality(struct dvb_frontend *fe,
return c_n;
}
+static int stv0900_read_ucblocks(struct dvb_frontend *fe, u32 * ucblocks)
+{
+ struct stv0900_state *state = fe->demodulator_priv;
+ struct stv0900_internal *i_params = state->internal;
+ enum fe_stv0900_demod_num demod = state->demod;
+ u8 err_val1, err_val0;
+ s32 err_field1, err_field0;
+ u32 header_err_val = 0;
+
+ *ucblocks = 0x0;
+ if (stv0900_get_standard(fe, demod) == STV0900_DVBS2_STANDARD) {
+ /* DVB-S2 delineator errors count */
+
+ /* retreiving number for errnous headers */
+ dmd_reg(err_field0, R0900_P1_BBFCRCKO0,
+ R0900_P2_BBFCRCKO0);
+ dmd_reg(err_field1, R0900_P1_BBFCRCKO1,
+ R0900_P2_BBFCRCKO1);
+
+ err_val1 = stv0900_read_reg(i_params, err_field1);
+ err_val0 = stv0900_read_reg(i_params, err_field0);
+ header_err_val = (err_val1<<8) | err_val0;
+
+ /* retreiving number for errnous packets */
+ dmd_reg(err_field0, R0900_P1_UPCRCKO0,
+ R0900_P2_UPCRCKO0);
+ dmd_reg(err_field1, R0900_P1_UPCRCKO1,
+ R0900_P2_UPCRCKO1);
+
+ err_val1 = stv0900_read_reg(i_params, err_field1);
+ err_val0 = stv0900_read_reg(i_params, err_field0);
+ *ucblocks = (err_val1<<8) | err_val0;
+ *ucblocks += header_err_val;
+ }
+
+ return 0;
+}
+
static int stv0900_read_snr(struct dvb_frontend *fe, u16 *snr)
{
*snr = stv0900_carr_get_quality(fe,
@@ -1355,7 +1393,7 @@ static enum fe_stv0900_error stv0900_init_internal(struct dvb_frontend *fe,
struct stv0900_state *state = fe->demodulator_priv;
enum fe_stv0900_error error = STV0900_NO_ERROR;
enum fe_stv0900_error demodError = STV0900_NO_ERROR;
- int selosci;
+ int selosci, i;
struct stv0900_inode *temp_int = find_inode(state->i2c_adap,
state->config->demod_address);
@@ -1402,7 +1440,23 @@ static enum fe_stv0900_error stv0900_init_internal(struct dvb_frontend *fe,
stv0900_write_bits(state->internal, F0900_P1_ROLLOFF_CONTROL, p_init->rolloff);
stv0900_write_bits(state->internal, F0900_P2_ROLLOFF_CONTROL, p_init->rolloff);
- stv0900_set_ts_parallel_serial(state->internal, p_init->path1_ts_clock, p_init->path2_ts_clock);
+ state->internal->ts_config = p_init->ts_config;
+ if (state->internal->ts_config == NULL)
+ stv0900_set_ts_parallel_serial(state->internal,
+ p_init->path1_ts_clock,
+ p_init->path2_ts_clock);
+ else {
+ for (i = 0; state->internal->ts_config[i].addr != 0xffff; i++)
+ stv0900_write_reg(state->internal,
+ state->internal->ts_config[i].addr,
+ state->internal->ts_config[i].val);
+
+ stv0900_write_bits(state->internal, F0900_P2_RST_HWARE, 1);
+ stv0900_write_bits(state->internal, F0900_P2_RST_HWARE, 0);
+ stv0900_write_bits(state->internal, F0900_P1_RST_HWARE, 1);
+ stv0900_write_bits(state->internal, F0900_P1_RST_HWARE, 0);
+ }
+
stv0900_write_bits(state->internal, F0900_P1_TUN_MADDRESS, p_init->tun1_maddress);
switch (p_init->tuner1_adc) {
case 1:
@@ -1882,6 +1936,7 @@ static struct dvb_frontend_ops stv0900_ops = {
.read_ber = stv0900_read_ber,
.read_signal_strength = stv0900_read_signal_strength,
.read_snr = stv0900_read_snr,
+ .read_ucblocks = stv0900_read_ucblocks,
};
struct dvb_frontend *stv0900_attach(const struct stv0900_config *config,
@@ -1915,6 +1970,7 @@ struct dvb_frontend *stv0900_attach(const struct stv0900_config *config,
init_params.tun1_iq_inversion = STV0900_IQ_NORMAL;
init_params.tuner1_adc = config->tun1_adc;
init_params.path2_ts_clock = config->path2_mode;
+ init_params.ts_config = config->ts_config_regs;
init_params.tun2_maddress = config->tun2_maddress;
init_params.tuner2_adc = config->tun2_adc;
init_params.tun2_iq_inversion = STV0900_IQ_SWAPPED;
diff --git a/drivers/media/dvb/frontends/stv0900_priv.h b/drivers/media/dvb/frontends/stv0900_priv.h
index 67dc8ec634e2..5ed7a145c7d3 100644
--- a/drivers/media/dvb/frontends/stv0900_priv.h
+++ b/drivers/media/dvb/frontends/stv0900_priv.h
@@ -271,6 +271,7 @@ struct stv0900_init_params{
/* IQ from the tuner2 to the demod */
enum stv0900_iq_inversion tun2_iq_inversion;
+ struct stv0900_reg *ts_config;
};
struct stv0900_search_params {
@@ -363,6 +364,7 @@ struct stv0900_internal{
u8 i2c_addr;
u8 clkmode;/* 0 for CLKI, 2 for XTALI */
u8 chip_id;
+ struct stv0900_reg *ts_config;
enum fe_stv0900_error errs;
int dmds_used;
};
diff --git a/drivers/media/dvb/frontends/stv090x.c b/drivers/media/dvb/frontends/stv090x.c
index 96ef745a2e4e..488bdfb34fb3 100644
--- a/drivers/media/dvb/frontends/stv090x.c
+++ b/drivers/media/dvb/frontends/stv090x.c
@@ -2674,7 +2674,7 @@ static u8 stv090x_optimize_carloop(struct stv090x_state *state, enum stv090x_mod
static u8 stv090x_optimize_carloop_short(struct stv090x_state *state)
{
- struct stv090x_short_frame_crloop *short_crl;
+ struct stv090x_short_frame_crloop *short_crl = NULL;
s32 index = 0;
u8 aclc = 0x0b;
@@ -2694,10 +2694,13 @@ static u8 stv090x_optimize_carloop_short(struct stv090x_state *state)
break;
}
- if (state->dev_ver >= 0x30)
- short_crl = stv090x_s2_short_crl_cut20;
- else if (state->dev_ver >= 0x20)
+ if (state->dev_ver >= 0x30) {
+ /* Cut 3.0 and up */
short_crl = stv090x_s2_short_crl_cut30;
+ } else {
+ /* Cut 2.0 and up: we don't support cuts older than 2.0 */
+ short_crl = stv090x_s2_short_crl_cut20;
+ }
if (state->srate <= 3000000)
aclc = short_crl[index].crl_2;
diff --git a/drivers/media/dvb/frontends/tda10048.c b/drivers/media/dvb/frontends/tda10048.c
index 4302c563a6b8..cc8862ce4aae 100644
--- a/drivers/media/dvb/frontends/tda10048.c
+++ b/drivers/media/dvb/frontends/tda10048.c
@@ -210,6 +210,7 @@ static struct pll_tab {
{ TDA10048_CLK_4000, TDA10048_IF_36130, 10, 0, 0 },
{ TDA10048_CLK_16000, TDA10048_IF_3300, 10, 3, 0 },
{ TDA10048_CLK_16000, TDA10048_IF_3500, 10, 3, 0 },
+ { TDA10048_CLK_16000, TDA10048_IF_3800, 10, 3, 0 },
{ TDA10048_CLK_16000, TDA10048_IF_4000, 10, 3, 0 },
{ TDA10048_CLK_16000, TDA10048_IF_4300, 10, 3, 0 },
{ TDA10048_CLK_16000, TDA10048_IF_36130, 10, 3, 0 },
diff --git a/drivers/media/dvb/siano/smscoreapi.c b/drivers/media/dvb/siano/smscoreapi.c
index 32be382f0e97..a246903c3341 100644
--- a/drivers/media/dvb/siano/smscoreapi.c
+++ b/drivers/media/dvb/siano/smscoreapi.c
@@ -1422,8 +1422,8 @@ int smscore_gpio_configure(struct smscore_device_t *coredev, u8 PinNum,
struct smscore_gpio_config *pGpioConfig) {
u32 totalLen;
- u32 TranslatedPinNum;
- u32 GroupNum;
+ u32 TranslatedPinNum = 0;
+ u32 GroupNum = 0;
u32 ElectricChar;
u32 groupCfg;
void *buffer;
diff --git a/drivers/media/dvb/ttpci/Kconfig b/drivers/media/dvb/ttpci/Kconfig
index 68eb4493f991..d8d4214fd65f 100644
--- a/drivers/media/dvb/ttpci/Kconfig
+++ b/drivers/media/dvb/ttpci/Kconfig
@@ -1,5 +1,6 @@
config TTPCI_EEPROM
tristate
+ depends on I2C
default n
config DVB_AV7110
diff --git a/drivers/media/radio/radio-si470x.c b/drivers/media/radio/radio-si470x.c
index 640421ceb24a..46d216329611 100644
--- a/drivers/media/radio/radio-si470x.c
+++ b/drivers/media/radio/radio-si470x.c
@@ -1200,7 +1200,7 @@ static int si470x_fops_release(struct file *file)
video_unregister_device(radio->videodev);
kfree(radio->buffer);
kfree(radio);
- goto done;
+ goto unlock;
}
/* stop rds reception */
@@ -1213,9 +1213,8 @@ static int si470x_fops_release(struct file *file)
retval = si470x_stop(radio);
usb_autopm_put_interface(radio->intf);
}
-
+unlock:
mutex_unlock(&radio->disconnect_lock);
-
done:
return retval;
}
diff --git a/drivers/media/radio/radio-tea5764.c b/drivers/media/radio/radio-tea5764.c
index 393623818ade..3cd76dddb6aa 100644
--- a/drivers/media/radio/radio-tea5764.c
+++ b/drivers/media/radio/radio-tea5764.c
@@ -322,7 +322,9 @@ static int vidioc_g_tuner(struct file *file, void *priv,
v->rangehigh = FREQ_MAX * FREQ_MUL;
v->capability = V4L2_TUNER_CAP_LOW | V4L2_TUNER_CAP_STEREO;
if (r->tunchk & TEA5764_TUNCHK_STEREO)
- v->rxsubchans = V4L2_TUNER_SUB_STEREO;
+ v->rxsubchans = V4L2_TUNER_SUB_STEREO;
+ else
+ v->rxsubchans = V4L2_TUNER_SUB_MONO;
v->audmode = tea5764_get_audout_mode(radio);
v->signal = TEA5764_TUNCHK_LEVEL(r->tunchk) * 0xffff / 0xf;
v->afc = TEA5764_TUNCHK_IFCNT(r->tunchk);
diff --git a/drivers/media/video/Kconfig b/drivers/media/video/Kconfig
index 94f440535c64..84b6fc15519d 100644
--- a/drivers/media/video/Kconfig
+++ b/drivers/media/video/Kconfig
@@ -312,6 +312,14 @@ config VIDEO_OV7670
OV7670 VGA camera. It currently only works with the M88ALP01
controller.
+config VIDEO_MT9V011
+ tristate "Micron mt9v011 sensor support"
+ depends on I2C && VIDEO_V4L2
+ ---help---
+ This is a Video4Linux2 sensor-level driver for the Micron
+ mt0v011 1.3 Mpixel camera. It currently only works with the
+ em28xx driver.
+
config VIDEO_TCM825X
tristate "TCM825x camera sensor support"
depends on I2C && VIDEO_V4L2
@@ -866,9 +874,13 @@ config USB_W9968CF
module will be called w9968cf.
config USB_OV511
- tristate "USB OV511 Camera support"
+ tristate "USB OV511 Camera support (DEPRECATED)"
depends on VIDEO_V4L1
---help---
+ This driver is DEPRECATED please use the gspca ov519 module
+ instead. Note that for the ov511 / ov518 support of the gspca module
+ you need atleast version 0.6.0 of libv4l.
+
Say Y here if you want to connect this type of camera to your
computer's USB port. See <file:Documentation/video4linux/ov511.txt>
for more information and for a list of supported cameras.
diff --git a/drivers/media/video/Makefile b/drivers/media/video/Makefile
index 7fb3add1b387..9f2e3214a482 100644
--- a/drivers/media/video/Makefile
+++ b/drivers/media/video/Makefile
@@ -69,6 +69,7 @@ obj-$(CONFIG_VIDEO_UPD64083) += upd64083.o
obj-$(CONFIG_VIDEO_OV7670) += ov7670.o
obj-$(CONFIG_VIDEO_TCM825X) += tcm825x.o
obj-$(CONFIG_VIDEO_TVEEPROM) += tveeprom.o
+obj-$(CONFIG_VIDEO_MT9V011) += mt9v011.o
obj-$(CONFIG_SOC_CAMERA_MT9M001) += mt9m001.o
obj-$(CONFIG_SOC_CAMERA_MT9M111) += mt9m111.o
diff --git a/drivers/media/video/cx18/cx18-cards.c b/drivers/media/video/cx18/cx18-cards.c
index c92a25036f0e..36f2d76006fd 100644
--- a/drivers/media/video/cx18/cx18-cards.c
+++ b/drivers/media/video/cx18/cx18-cards.c
@@ -198,11 +198,14 @@ static const struct cx18_card_pci_info cx18_pci_mpc718[] = {
static const struct cx18_card cx18_card_mpc718 = {
.type = CX18_CARD_YUAN_MPC718,
- .name = "Yuan MPC718",
- .comment = "Analog video capture works; some audio line in may not.\n",
+ .name = "Yuan MPC718 MiniPCI DVB-T/Analog",
+ .comment = "Experimenters needed for device to work well.\n"
+ "\tTo help, mail the ivtv-devel list (www.ivtvdriver.org).\n",
.v4l2_capabilities = CX18_CAP_ENCODER,
.hw_audio_ctrl = CX18_HW_418_AV,
- .hw_all = CX18_HW_418_AV | CX18_HW_TUNER | CX18_HW_GPIO_RESET_CTRL,
+ .hw_muxer = CX18_HW_GPIO_MUX,
+ .hw_all = CX18_HW_418_AV | CX18_HW_TUNER |
+ CX18_HW_GPIO_MUX | CX18_HW_DVB | CX18_HW_GPIO_RESET_CTRL,
.video_inputs = {
{ CX18_CARD_INPUT_VID_TUNER, 0, CX18_AV_COMPOSITE2 },
{ CX18_CARD_INPUT_SVIDEO1, 1,
@@ -211,27 +214,34 @@ static const struct cx18_card cx18_card_mpc718 = {
{ CX18_CARD_INPUT_SVIDEO2, 2,
CX18_AV_SVIDEO_LUMA7 | CX18_AV_SVIDEO_CHROMA8 },
{ CX18_CARD_INPUT_COMPOSITE2, 2, CX18_AV_COMPOSITE6 },
- { CX18_CARD_INPUT_COMPOSITE3, 2, CX18_AV_COMPOSITE3 },
},
.audio_inputs = {
{ CX18_CARD_INPUT_AUD_TUNER, CX18_AV_AUDIO5, 0 },
- { CX18_CARD_INPUT_LINE_IN1, CX18_AV_AUDIO_SERIAL1, 0 },
- { CX18_CARD_INPUT_LINE_IN2, CX18_AV_AUDIO_SERIAL1, 0 },
+ { CX18_CARD_INPUT_LINE_IN1, CX18_AV_AUDIO_SERIAL1, 1 },
+ { CX18_CARD_INPUT_LINE_IN2, CX18_AV_AUDIO_SERIAL2, 1 },
},
- .radio_input = { CX18_CARD_INPUT_AUD_TUNER, CX18_AV_AUDIO_SERIAL1, 0 },
.tuners = {
/* XC3028 tuner */
{ .std = V4L2_STD_ALL, .tuner = TUNER_XC2028 },
},
+ /* FIXME - the FM radio is just a guess and driver doesn't use SIF */
+ .radio_input = { CX18_CARD_INPUT_AUD_TUNER, CX18_AV_AUDIO5, 2 },
.ddr = {
- /* Probably Samsung K4D263238G-VC33 memory */
- .chip_config = 0x003,
- .refresh = 0x30c,
- .timing1 = 0x23230b73,
- .timing2 = 0x08,
+ /* Hynix HY5DU283222B DDR RAM */
+ .chip_config = 0x303,
+ .refresh = 0x3bd,
+ .timing1 = 0x36320966,
+ .timing2 = 0x1f,
.tune_lane = 0,
.initial_emrs = 2,
},
+ .gpio_init.initial_value = 0x1,
+ .gpio_init.direction = 0x3,
+ /* FIXME - these GPIO's are just guesses */
+ .gpio_audio_input = { .mask = 0x3,
+ .tuner = 0x1,
+ .linein = 0x3,
+ .radio = 0x1 },
.xceive_pin = 0,
.pci_list = cx18_pci_mpc718,
.i2c = &cx18_i2c_std,
diff --git a/drivers/media/video/cx18/cx18-controls.c b/drivers/media/video/cx18/cx18-controls.c
index 8e35c3aed544..5136df198338 100644
--- a/drivers/media/video/cx18/cx18-controls.c
+++ b/drivers/media/video/cx18/cx18-controls.c
@@ -61,6 +61,8 @@ int cx18_queryctrl(struct file *file, void *fh, struct v4l2_queryctrl *qctrl)
switch (qctrl->id) {
/* Standard V4L2 controls */
+ case V4L2_CID_USER_CLASS:
+ return v4l2_ctrl_query_fill(qctrl, 0, 0, 0, 0);
case V4L2_CID_BRIGHTNESS:
case V4L2_CID_HUE:
case V4L2_CID_SATURATION:
diff --git a/drivers/media/video/cx18/cx18-dvb.c b/drivers/media/video/cx18/cx18-dvb.c
index 6ea3fe623ef4..51a0c33b25b7 100644
--- a/drivers/media/video/cx18/cx18-dvb.c
+++ b/drivers/media/video/cx18/cx18-dvb.c
@@ -30,6 +30,10 @@
#include "s5h1409.h"
#include "mxl5005s.h"
#include "zl10353.h"
+
+#include <linux/firmware.h>
+#include "mt352.h"
+#include "mt352_priv.h"
#include "tuner-xc2028.h"
DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
@@ -38,6 +42,11 @@ DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
#define CX18_CLOCK_ENABLE2 0xc71024
#define CX18_DMUX_CLK_MASK 0x0080
+/*
+ * CX18_CARD_HVR_1600_ESMT
+ * CX18_CARD_HVR_1600_SAMSUNG
+ */
+
static struct mxl5005s_config hauppauge_hvr1600_tuner = {
.i2c_address = 0xC6 >> 1,
.if_freq = IF_FREQ_5380000HZ,
@@ -65,6 +74,9 @@ static struct s5h1409_config hauppauge_hvr1600_config = {
.mpeg_timing = S5H1409_MPEGTIMING_CONTINOUS_NONINVERTING_CLOCK
};
+/*
+ * CX18_CARD_LEADTEK_DVR3100H
+ */
/* Information/confirmation of proper config values provided by Terry Wu */
static struct zl10353_config leadtek_dvr3100h_demod = {
.demod_address = 0x1e >> 1, /* Datasheet suggested straps */
@@ -74,6 +86,121 @@ static struct zl10353_config leadtek_dvr3100h_demod = {
.disable_i2c_gate_ctrl = 1, /* Disable the I2C gate */
};
+/*
+ * CX18_CARD_YUAN_MPC718
+ */
+/*
+ * Due to
+ *
+ * 1. an absence of information on how to prgram the MT352
+ * 2. the Linux mt352 module pushing MT352 initialzation off onto us here
+ *
+ * We have to use an init sequence that *you* must extract from the Windows
+ * driver (yuanrap.sys) and which we load as a firmware.
+ *
+ * If someone can provide me with a Zarlink MT352 (Intel CE6352?) Design Manual
+ * with chip programming details, then I can remove this annoyance.
+ */
+static int yuan_mpc718_mt352_reqfw(struct cx18_stream *stream,
+ const struct firmware **fw)
+{
+ struct cx18 *cx = stream->cx;
+ const char *fn = "dvb-cx18-mpc718-mt352.fw";
+ int ret;
+
+ ret = request_firmware(fw, fn, &cx->pci_dev->dev);
+ if (ret)
+ CX18_ERR("Unable to open firmware file %s\n", fn);
+ else {
+ size_t sz = (*fw)->size;
+ if (sz < 2 || sz > 64 || (sz % 2) != 0) {
+ CX18_ERR("Firmware %s has a bad size: %lu bytes\n",
+ fn, (unsigned long) sz);
+ ret = -EILSEQ;
+ release_firmware(*fw);
+ *fw = NULL;
+ }
+ }
+
+ if (ret) {
+ CX18_ERR("The MPC718 board variant with the MT352 DVB-T"
+ "demodualtor will not work without it\n");
+ CX18_ERR("Run 'linux/Documentation/dvb/get_dvb_firmware "
+ "mpc718' if you need the firmware\n");
+ }
+ return ret;
+}
+
+static int yuan_mpc718_mt352_init(struct dvb_frontend *fe)
+{
+ struct cx18_dvb *dvb = container_of(fe->dvb,
+ struct cx18_dvb, dvb_adapter);
+ struct cx18_stream *stream = container_of(dvb, struct cx18_stream, dvb);
+ const struct firmware *fw = NULL;
+ int ret;
+ int i;
+ u8 buf[3];
+
+ ret = yuan_mpc718_mt352_reqfw(stream, &fw);
+ if (ret)
+ return ret;
+
+ /* Loop through all the register-value pairs in the firmware file */
+ for (i = 0; i < fw->size; i += 2) {
+ buf[0] = fw->data[i];
+ /* Intercept a few registers we want to set ourselves */
+ switch (buf[0]) {
+ case TRL_NOMINAL_RATE_0:
+ /* Set our custom OFDM bandwidth in the case below */
+ break;
+ case TRL_NOMINAL_RATE_1:
+ /* 6 MHz: 64/7 * 6/8 / 20.48 * 2^16 = 0x55b6.db6 */
+ /* 7 MHz: 64/7 * 7/8 / 20.48 * 2^16 = 0x6400 */
+ /* 8 MHz: 64/7 * 8/8 / 20.48 * 2^16 = 0x7249.249 */
+ buf[1] = 0x72;
+ buf[2] = 0x49;
+ mt352_write(fe, buf, 3);
+ break;
+ case INPUT_FREQ_0:
+ /* Set our custom IF in the case below */
+ break;
+ case INPUT_FREQ_1:
+ /* 4.56 MHz IF: (20.48 - 4.56)/20.48 * 2^14 = 0x31c0 */
+ buf[1] = 0x31;
+ buf[2] = 0xc0;
+ mt352_write(fe, buf, 3);
+ break;
+ default:
+ /* Pass through the register-value pair from the fw */
+ buf[1] = fw->data[i+1];
+ mt352_write(fe, buf, 2);
+ break;
+ }
+ }
+
+ buf[0] = (u8) TUNER_GO;
+ buf[1] = 0x01; /* Go */
+ mt352_write(fe, buf, 2);
+ release_firmware(fw);
+ return 0;
+}
+
+static struct mt352_config yuan_mpc718_mt352_demod = {
+ .demod_address = 0x1e >> 1,
+ .adc_clock = 20480, /* 20.480 MHz */
+ .if2 = 4560, /* 4.560 MHz */
+ .no_tuner = 1, /* XC3028 is not behind the gate */
+ .demod_init = yuan_mpc718_mt352_init,
+};
+
+static struct zl10353_config yuan_mpc718_zl10353_demod = {
+ .demod_address = 0x1e >> 1, /* Datasheet suggested straps */
+ .if2 = 45600, /* 4.560 MHz IF from the XC3028 */
+ .parallel_ts = 1, /* Not a serial TS */
+ .no_tuner = 1, /* XC3028 is not behind the gate */
+ .disable_i2c_gate_ctrl = 1, /* Disable the I2C gate */
+};
+
static int dvb_register(struct cx18_stream *stream);
/* Kernel DVB framework calls this when the feed needs to start.
@@ -113,6 +240,7 @@ static int cx18_dvb_start_feed(struct dvb_demux_feed *feed)
break;
case CX18_CARD_LEADTEK_DVR3100H:
+ case CX18_CARD_YUAN_MPC718:
default:
/* Assumption - Parallel transport - Signalling
* undefined or default.
@@ -326,6 +454,38 @@ static int dvb_register(struct cx18_stream *stream)
fe->ops.tuner_ops.set_config(fe, &ctrl);
}
break;
+ case CX18_CARD_YUAN_MPC718:
+ /*
+ * TODO
+ * Apparently, these cards also could instead have a
+ * DiBcom demod supported by one of the db7000 drivers
+ */
+ dvb->fe = dvb_attach(mt352_attach,
+ &yuan_mpc718_mt352_demod,
+ &cx->i2c_adap[1]);
+ if (dvb->fe == NULL)
+ dvb->fe = dvb_attach(zl10353_attach,
+ &yuan_mpc718_zl10353_demod,
+ &cx->i2c_adap[1]);
+ if (dvb->fe != NULL) {
+ struct dvb_frontend *fe;
+ struct xc2028_config cfg = {
+ .i2c_adap = &cx->i2c_adap[1],
+ .i2c_addr = 0xc2 >> 1,
+ .ctrl = NULL,
+ };
+ static struct xc2028_ctrl ctrl = {
+ .fname = XC2028_DEFAULT_FIRMWARE,
+ .max_len = 64,
+ .demod = XC3028_FE_ZARLINK456,
+ .type = XC2028_AUTO,
+ };
+
+ fe = dvb_attach(xc2028_attach, dvb->fe, &cfg);
+ if (fe != NULL && fe->ops.tuner_ops.set_config != NULL)
+ fe->ops.tuner_ops.set_config(fe, &ctrl);
+ }
+ break;
default:
/* No Digital Tv Support */
break;
diff --git a/drivers/media/video/cx231xx/cx231xx-avcore.c b/drivers/media/video/cx231xx/cx231xx-avcore.c
index 6a9464079b4c..28f48f41f218 100644
--- a/drivers/media/video/cx231xx/cx231xx-avcore.c
+++ b/drivers/media/video/cx231xx/cx231xx-avcore.c
@@ -1052,22 +1052,13 @@ int cx231xx_set_audio_decoder_input(struct cx231xx *dev,
/* Set resolution of the video */
int cx231xx_resolution_set(struct cx231xx *dev)
{
- int width, height;
- u32 hscale, vscale;
- int status = 0;
-
- width = dev->width;
- height = dev->height;
-
- get_scale(dev, width, height, &hscale, &vscale);
-
/* set horzontal scale */
- status = vid_blk_write_word(dev, HSCALE_CTRL, hscale);
+ int status = vid_blk_write_word(dev, HSCALE_CTRL, dev->hscale);
+ if (status)
+ return status;
/* set vertical scale */
- status = vid_blk_write_word(dev, VSCALE_CTRL, vscale);
-
- return status;
+ return vid_blk_write_word(dev, VSCALE_CTRL, dev->vscale);
}
/******************************************************************************
@@ -2055,7 +2046,7 @@ int cx231xx_initialize_stream_xfer(struct cx231xx *dev, u32 media_type)
int cx231xx_capture_start(struct cx231xx *dev, int start, u8 media_type)
{
- int rc;
+ int rc = -1;
u32 ep_mask = -1;
struct pcb_config *pcb_config;
diff --git a/drivers/media/video/cx231xx/cx231xx-video.c b/drivers/media/video/cx231xx/cx231xx-video.c
index a23ae73fe634..609bae6098d3 100644
--- a/drivers/media/video/cx231xx/cx231xx-video.c
+++ b/drivers/media/video/cx231xx/cx231xx-video.c
@@ -893,9 +893,9 @@ static int check_dev(struct cx231xx *dev)
return 0;
}
-void get_scale(struct cx231xx *dev,
- unsigned int width, unsigned int height,
- unsigned int *hscale, unsigned int *vscale)
+static void get_scale(struct cx231xx *dev,
+ unsigned int width, unsigned int height,
+ unsigned int *hscale, unsigned int *vscale)
{
unsigned int maxw = norm_maxw(dev);
unsigned int maxh = norm_maxh(dev);
@@ -907,10 +907,6 @@ void get_scale(struct cx231xx *dev,
*vscale = (((unsigned long)maxh) << 12) / height - 4096L;
if (*vscale >= 0x4000)
*vscale = 0x3fff;
-
- dev->hscale = *hscale;
- dev->vscale = *vscale;
-
}
/* ------------------------------------------------------------------
@@ -955,8 +951,8 @@ static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
{
struct cx231xx_fh *fh = priv;
struct cx231xx *dev = fh->dev;
- int width = f->fmt.pix.width;
- int height = f->fmt.pix.height;
+ unsigned int width = f->fmt.pix.width;
+ unsigned int height = f->fmt.pix.height;
unsigned int maxw = norm_maxw(dev);
unsigned int maxh = norm_maxh(dev);
unsigned int hscale, vscale;
@@ -971,17 +967,7 @@ static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
/* width must even because of the YUYV format
height must be even because of interlacing */
- height &= 0xfffe;
- width &= 0xfffe;
-
- if (unlikely(height < 32))
- height = 32;
- if (unlikely(height > maxh))
- height = maxh;
- if (unlikely(width < 48))
- width = 48;
- if (unlikely(width > maxw))
- width = maxw;
+ v4l_bound_align_image(&width, 48, maxw, 1, &height, 32, maxh, 1, 0);
get_scale(dev, width, height, &hscale, &vscale);
diff --git a/drivers/media/video/cx231xx/cx231xx.h b/drivers/media/video/cx231xx/cx231xx.h
index e38eb2d425f7..a0f823ac6b8d 100644
--- a/drivers/media/video/cx231xx/cx231xx.h
+++ b/drivers/media/video/cx231xx/cx231xx.h
@@ -722,9 +722,6 @@ int cx231xx_set_video_input_mux(struct cx231xx *dev, u8 input);
int cx231xx_set_decoder_video_input(struct cx231xx *dev, u8 pin_type, u8 input);
int cx231xx_do_mode_ctrl_overrides(struct cx231xx *dev);
int cx231xx_set_audio_input(struct cx231xx *dev, u8 input);
-void get_scale(struct cx231xx *dev,
- unsigned int width, unsigned int height,
- unsigned int *hscale, unsigned int *vscale);
/* Provided by cx231xx-video.c */
int cx231xx_register_extension(struct cx231xx_ops *dev);
diff --git a/drivers/media/video/cx2341x.c b/drivers/media/video/cx2341x.c
index 8ded52946334..4c8e95853fa3 100644
--- a/drivers/media/video/cx2341x.c
+++ b/drivers/media/video/cx2341x.c
@@ -500,6 +500,8 @@ int cx2341x_ctrl_query(const struct cx2341x_mpeg_params *params,
int err;
switch (qctrl->id) {
+ case V4L2_CID_MPEG_CLASS:
+ return v4l2_ctrl_query_fill(qctrl, 0, 0, 0, 0);
case V4L2_CID_MPEG_STREAM_TYPE:
return v4l2_ctrl_query_fill(qctrl,
V4L2_MPEG_STREAM_TYPE_MPEG2_PS,
diff --git a/drivers/media/video/cx23885/cx23885-dvb.c b/drivers/media/video/cx23885/cx23885-dvb.c
index e236df23370e..86ac529e62be 100644
--- a/drivers/media/video/cx23885/cx23885-dvb.c
+++ b/drivers/media/video/cx23885/cx23885-dvb.c
@@ -45,6 +45,7 @@
#include "dibx000_common.h"
#include "zl10353.h"
#include "stv0900.h"
+#include "stv0900_reg.h"
#include "stv6110.h"
#include "lnbh24.h"
#include "cx24116.h"
@@ -242,12 +243,22 @@ static struct tda18271_std_map hauppauge_tda18271_std_map = {
.if_lvl = 6, .rfagc_top = 0x37 },
};
+static struct tda18271_std_map hauppauge_hvr1200_tda18271_std_map = {
+ .dvbt_6 = { .if_freq = 3300, .agc_mode = 3, .std = 4,
+ .if_lvl = 1, .rfagc_top = 0x37, },
+ .dvbt_7 = { .if_freq = 3800, .agc_mode = 3, .std = 5,
+ .if_lvl = 1, .rfagc_top = 0x37, },
+ .dvbt_8 = { .if_freq = 4300, .agc_mode = 3, .std = 6,
+ .if_lvl = 1, .rfagc_top = 0x37, },
+};
+
static struct tda18271_config hauppauge_tda18271_config = {
.std_map = &hauppauge_tda18271_std_map,
.gate = TDA18271_GATE_ANALOG,
};
static struct tda18271_config hauppauge_hvr1200_tuner_config = {
+ .std_map = &hauppauge_hvr1200_tda18271_std_map,
.gate = TDA18271_GATE_ANALOG,
};
@@ -370,13 +381,25 @@ static struct zl10353_config dvico_fusionhdtv_xc3028 = {
.disable_i2c_gate_ctrl = 1,
};
+static struct stv0900_reg stv0900_ts_regs[] = {
+ { R0900_TSGENERAL, 0x00 },
+ { R0900_P1_TSSPEED, 0x40 },
+ { R0900_P2_TSSPEED, 0x40 },
+ { R0900_P1_TSCFGM, 0xc0 },
+ { R0900_P2_TSCFGM, 0xc0 },
+ { R0900_P1_TSCFGH, 0xe0 },
+ { R0900_P2_TSCFGH, 0xe0 },
+ { R0900_P1_TSCFGL, 0x20 },
+ { R0900_P2_TSCFGL, 0x20 },
+ { 0xffff, 0xff }, /* terminate */
+};
+
static struct stv0900_config netup_stv0900_config = {
.demod_address = 0x68,
.xtal = 27000000,
.clkmode = 3,/* 0-CLKI, 2-XTALI, else AUTO */
.diseqc_mode = 2,/* 2/3 PWM */
- .path1_mode = 2,/*Serial continues clock */
- .path2_mode = 2,/*Serial continues clock */
+ .ts_config_regs = stv0900_ts_regs,
.tun1_maddress = 0,/* 0x60 */
.tun2_maddress = 3,/* 0x63 */
.tun1_adc = 1,/* 1 Vpp */
@@ -440,6 +463,30 @@ static struct xc5000_config mygica_x8506_xc5000_config = {
.if_khz = 5380,
};
+static int cx23885_dvb_set_frontend(struct dvb_frontend *fe,
+ struct dvb_frontend_parameters *param)
+{
+ struct cx23885_tsport *port = fe->dvb->priv;
+ struct cx23885_dev *dev = port->dev;
+
+ switch (dev->board) {
+ case CX23885_BOARD_HAUPPAUGE_HVR1275:
+ switch (param->u.vsb.modulation) {
+ case VSB_8:
+ cx23885_gpio_clear(dev, GPIO_5);
+ break;
+ case QAM_64:
+ case QAM_256:
+ default:
+ cx23885_gpio_set(dev, GPIO_5);
+ break;
+ }
+ break;
+ }
+ return (port->set_frontend_save) ?
+ port->set_frontend_save(fe, param) : -ENODEV;
+}
+
static int dvb_register(struct cx23885_tsport *port)
{
struct cx23885_dev *dev = port->dev;
@@ -479,6 +526,12 @@ static int dvb_register(struct cx23885_tsport *port)
0x60, &dev->i2c_bus[1].i2c_adap,
&hauppauge_hvr127x_config);
}
+
+ /* FIXME: temporary hack */
+ /* define bridge override to set_frontend */
+ port->set_frontend_save = fe0->dvb.frontend->ops.set_frontend;
+ fe0->dvb.frontend->ops.set_frontend = cx23885_dvb_set_frontend;
+
break;
case CX23885_BOARD_HAUPPAUGE_HVR1255:
i2c_bus = &dev->i2c_bus[0];
@@ -736,7 +789,8 @@ static int dvb_register(struct cx23885_tsport *port)
if (!dvb_attach(lnbh24_attach,
fe0->dvb.frontend,
&i2c_bus->i2c_adap,
- LNBH24_PCL, 0, 0x09))
+ LNBH24_PCL,
+ LNBH24_TTX, 0x09))
printk(KERN_ERR
"No LNBH24 found!\n");
@@ -756,7 +810,8 @@ static int dvb_register(struct cx23885_tsport *port)
if (!dvb_attach(lnbh24_attach,
fe0->dvb.frontend,
&i2c_bus->i2c_adap,
- LNBH24_PCL, 0, 0x0a))
+ LNBH24_PCL,
+ LNBH24_TTX, 0x0a))
printk(KERN_ERR
"No LNBH24 found!\n");
diff --git a/drivers/media/video/cx23885/cx23885-video.c b/drivers/media/video/cx23885/cx23885-video.c
index 66bbd2e71105..70836af3ab48 100644
--- a/drivers/media/video/cx23885/cx23885-video.c
+++ b/drivers/media/video/cx23885/cx23885-video.c
@@ -963,15 +963,8 @@ static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
}
f->fmt.pix.field = field;
- if (f->fmt.pix.height < 32)
- f->fmt.pix.height = 32;
- if (f->fmt.pix.height > maxh)
- f->fmt.pix.height = maxh;
- if (f->fmt.pix.width < 48)
- f->fmt.pix.width = 48;
- if (f->fmt.pix.width > maxw)
- f->fmt.pix.width = maxw;
- f->fmt.pix.width &= ~0x03;
+ v4l_bound_align_image(&f->fmt.pix.width, 48, maxw, 2,
+ &f->fmt.pix.height, 32, maxh, 0, 0);
f->fmt.pix.bytesperline =
(f->fmt.pix.width * fmt->depth) >> 3;
f->fmt.pix.sizeimage =
diff --git a/drivers/media/video/cx23885/cx23885.h b/drivers/media/video/cx23885/cx23885.h
index 1a2ac518a3f1..214a55e943b7 100644
--- a/drivers/media/video/cx23885/cx23885.h
+++ b/drivers/media/video/cx23885/cx23885.h
@@ -288,6 +288,10 @@ struct cx23885_tsport {
/* Allow a single tsport to have multiple frontends */
u32 num_frontends;
void *port_priv;
+
+ /* FIXME: temporary hack */
+ int (*set_frontend_save) (struct dvb_frontend *,
+ struct dvb_frontend_parameters *);
};
struct cx23885_dev {
diff --git a/drivers/media/video/cx88/cx88-cards.c b/drivers/media/video/cx88/cx88-cards.c
index 94b7a52629d0..a5cc1c1fc2d6 100644
--- a/drivers/media/video/cx88/cx88-cards.c
+++ b/drivers/media/video/cx88/cx88-cards.c
@@ -1524,33 +1524,45 @@ static const struct cx88_board cx88_boards[] = {
},
.mpeg = CX88_MPEG_DVB,
},
+ /* Terry Wu <terrywu2009@gmail.com> */
+ /* TV Audio : set GPIO 2, 18, 19 value to 0, 1, 0 */
+ /* FM Audio : set GPIO 2, 18, 19 value to 0, 0, 0 */
+ /* Line-in Audio : set GPIO 2, 18, 19 value to 0, 1, 1 */
+ /* Mute Audio : set GPIO 2 value to 1 */
[CX88_BOARD_WINFAST_TV2000_XP_GLOBAL] = {
- .name = "Winfast TV2000 XP Global",
+ .name = "Leadtek TV2000 XP Global",
.tuner_type = TUNER_XC2028,
.tuner_addr = 0x61,
+ .radio_type = TUNER_XC2028,
+ .radio_addr = 0x61,
.input = { {
.type = CX88_VMUX_TELEVISION,
.vmux = 0,
- .gpio0 = 0x0400, /* pin 2:mute = 0 (off?) */
+ .gpio0 = 0x0400, /* pin 2 = 0 */
.gpio1 = 0x0000,
- .gpio2 = 0x0800, /* pin 19:audio = 0 (tv) */
-
+ .gpio2 = 0x0C04, /* pin 18 = 1, pin 19 = 0 */
+ .gpio3 = 0x0000,
}, {
.type = CX88_VMUX_COMPOSITE1,
.vmux = 1,
- .gpio0 = 0x0400, /* probably? or 0x0404 to turn mute on */
+ .gpio0 = 0x0400, /* pin 2 = 0 */
.gpio1 = 0x0000,
- .gpio2 = 0x0808, /* pin 19:audio = 1 (line) */
-
+ .gpio2 = 0x0C0C, /* pin 18 = 1, pin 19 = 1 */
+ .gpio3 = 0x0000,
}, {
.type = CX88_VMUX_SVIDEO,
.vmux = 2,
+ .gpio0 = 0x0400, /* pin 2 = 0 */
+ .gpio1 = 0x0000,
+ .gpio2 = 0x0C0C, /* pin 18 = 1, pin 19 = 1 */
+ .gpio3 = 0x0000,
} },
.radio = {
.type = CX88_RADIO,
- .gpio0 = 0x004ff,
- .gpio1 = 0x010ff,
- .gpio2 = 0x0ff,
+ .gpio0 = 0x0400, /* pin 2 = 0 */
+ .gpio1 = 0x0000,
+ .gpio2 = 0x0C00, /* pin 18 = 0, pin 19 = 0 */
+ .gpio3 = 0x0000,
},
},
[CX88_BOARD_POWERCOLOR_REAL_ANGEL] = {
@@ -2438,6 +2450,41 @@ static const struct cx88_subid cx88_subids[] = {
.subvendor = 0x107d,
.subdevice = 0x6654,
.card = CX88_BOARD_WINFAST_DTV1800H,
+ }, {
+ /* PVR2000 PAL Model [107d:6630] */
+ .subvendor = 0x107d,
+ .subdevice = 0x6630,
+ .card = CX88_BOARD_LEADTEK_PVR2000,
+ }, {
+ /* PVR2000 PAL Model [107d:6638] */
+ .subvendor = 0x107d,
+ .subdevice = 0x6638,
+ .card = CX88_BOARD_LEADTEK_PVR2000,
+ }, {
+ /* PVR2000 NTSC Model [107d:6631] */
+ .subvendor = 0x107d,
+ .subdevice = 0x6631,
+ .card = CX88_BOARD_LEADTEK_PVR2000,
+ }, {
+ /* PVR2000 NTSC Model [107d:6637] */
+ .subvendor = 0x107d,
+ .subdevice = 0x6637,
+ .card = CX88_BOARD_LEADTEK_PVR2000,
+ }, {
+ /* PVR2000 NTSC Model [107d:663d] */
+ .subvendor = 0x107d,
+ .subdevice = 0x663d,
+ .card = CX88_BOARD_LEADTEK_PVR2000,
+ }, {
+ /* DV2000 NTSC Model [107d:6621] */
+ .subvendor = 0x107d,
+ .subdevice = 0x6621,
+ .card = CX88_BOARD_WINFAST_DV2000,
+ }, {
+ /* TV2000 XP Global [107d:6618] */
+ .subvendor = 0x107d,
+ .subdevice = 0x6618,
+ .card = CX88_BOARD_WINFAST_TV2000_XP_GLOBAL,
},
};
@@ -2446,12 +2493,6 @@ static const struct cx88_subid cx88_subids[] = {
static void leadtek_eeprom(struct cx88_core *core, u8 *eeprom_data)
{
- /* This is just for the "Winfast 2000XP Expert" board ATM; I don't have data on
- * any others.
- *
- * Byte 0 is 1 on the NTSC board.
- */
-
if (eeprom_data[4] != 0x7d ||
eeprom_data[5] != 0x10 ||
eeprom_data[7] != 0x66) {
@@ -2459,8 +2500,19 @@ static void leadtek_eeprom(struct cx88_core *core, u8 *eeprom_data)
return;
}
- core->board.tuner_type = (eeprom_data[6] == 0x13) ?
- TUNER_PHILIPS_FM1236_MK3 : TUNER_PHILIPS_FM1216ME_MK3;
+ /* Terry Wu <terrywu2009@gmail.com> */
+ switch (eeprom_data[6]) {
+ case 0x13: /* SSID 6613 for TV2000 XP Expert NTSC Model */
+ case 0x21: /* SSID 6621 for DV2000 NTSC Model */
+ case 0x31: /* SSID 6631 for PVR2000 NTSC Model */
+ case 0x37: /* SSID 6637 for PVR2000 NTSC Model */
+ case 0x3d: /* SSID 6637 for PVR2000 NTSC Model */
+ core->board.tuner_type = TUNER_PHILIPS_FM1236_MK3;
+ break;
+ default:
+ core->board.tuner_type = TUNER_PHILIPS_FM1216ME_MK3;
+ break;
+ }
info_printk(core, "Leadtek Winfast 2000XP Expert config: "
"tuner=%d, eeprom[0]=0x%02x\n",
@@ -2713,7 +2765,6 @@ static int cx88_xc2028_tuner_callback(struct cx88_core *core,
{
/* Board-specific callbacks */
switch (core->boardnr) {
- case CX88_BOARD_WINFAST_TV2000_XP_GLOBAL:
case CX88_BOARD_POWERCOLOR_REAL_ANGEL:
case CX88_BOARD_GENIATECH_X8000_MT:
case CX88_BOARD_KWORLD_ATSC_120:
@@ -2725,6 +2776,7 @@ static int cx88_xc2028_tuner_callback(struct cx88_core *core,
case CX88_BOARD_DVICO_FUSIONHDTV_DVB_T_PRO:
case CX88_BOARD_DVICO_FUSIONHDTV_5_PCI_NANO:
return cx88_dvico_xc2028_callback(core, command, arg);
+ case CX88_BOARD_WINFAST_TV2000_XP_GLOBAL:
case CX88_BOARD_WINFAST_DTV1800H:
return cx88_xc3028_winfast1800h_callback(core, command, arg);
}
@@ -2914,6 +2966,7 @@ static void cx88_card_setup_pre_i2c(struct cx88_core *core)
udelay(1000);
break;
+ case CX88_BOARD_WINFAST_TV2000_XP_GLOBAL:
case CX88_BOARD_WINFAST_DTV1800H:
/* GPIO 12 (xc3028 tuner reset) */
cx_set(MO_GP1_IO, 0x1010);
@@ -2950,6 +3003,7 @@ void cx88_setup_xc3028(struct cx88_core *core, struct xc2028_ctrl *ctl)
case CX88_BOARD_DVICO_FUSIONHDTV_5_PCI_NANO:
ctl->demod = XC3028_FE_OREN538;
break;
+ case CX88_BOARD_WINFAST_TV2000_XP_GLOBAL:
case CX88_BOARD_PROLINK_PV_GLOBAL_XTREME:
case CX88_BOARD_PROLINK_PV_8000GT:
/*
@@ -2993,6 +3047,8 @@ static void cx88_card_setup(struct cx88_core *core)
if (0 == core->i2c_rc)
gdi_eeprom(core, eeprom);
break;
+ case CX88_BOARD_LEADTEK_PVR2000:
+ case CX88_BOARD_WINFAST_DV2000:
case CX88_BOARD_WINFAST2000XP_EXPERT:
if (0 == core->i2c_rc)
leadtek_eeprom(core, eeprom);
diff --git a/drivers/media/video/cx88/cx88-video.c b/drivers/media/video/cx88/cx88-video.c
index 0ccac702bea4..b12770848c00 100644
--- a/drivers/media/video/cx88/cx88-video.c
+++ b/drivers/media/video/cx88/cx88-video.c
@@ -1111,15 +1111,8 @@ static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
}
f->fmt.pix.field = field;
- if (f->fmt.pix.height < 32)
- f->fmt.pix.height = 32;
- if (f->fmt.pix.height > maxh)
- f->fmt.pix.height = maxh;
- if (f->fmt.pix.width < 48)
- f->fmt.pix.width = 48;
- if (f->fmt.pix.width > maxw)
- f->fmt.pix.width = maxw;
- f->fmt.pix.width &= ~0x03;
+ v4l_bound_align_image(&f->fmt.pix.width, 48, maxw, 2,
+ &f->fmt.pix.height, 32, maxh, 0, 0);
f->fmt.pix.bytesperline =
(f->fmt.pix.width * fmt->depth) >> 3;
f->fmt.pix.sizeimage =
diff --git a/drivers/media/video/em28xx/Kconfig b/drivers/media/video/em28xx/Kconfig
index 16a5af30e9d1..6524b493e033 100644
--- a/drivers/media/video/em28xx/Kconfig
+++ b/drivers/media/video/em28xx/Kconfig
@@ -8,6 +8,8 @@ config VIDEO_EM28XX
select VIDEO_SAA711X if VIDEO_HELPER_CHIPS_AUTO
select VIDEO_TVP5150 if VIDEO_HELPER_CHIPS_AUTO
select VIDEO_MSP3400 if VIDEO_HELPER_CHIPS_AUTO
+ select VIDEO_MT9V011 if VIDEO_HELPER_CHIPS_AUTO
+
---help---
This is a video4linux driver for Empia 28xx based TV cards.
diff --git a/drivers/media/video/em28xx/em28xx-cards.c b/drivers/media/video/em28xx/em28xx-cards.c
index 00cc791a9e44..ebd24a25fb85 100644
--- a/drivers/media/video/em28xx/em28xx-cards.c
+++ b/drivers/media/video/em28xx/em28xx-cards.c
@@ -58,6 +58,8 @@ static unsigned int card[] = {[0 ... (EM28XX_MAXBOARDS - 1)] = UNSET };
module_param_array(card, int, NULL, 0444);
MODULE_PARM_DESC(card, "card type");
+#define MT9V011_VERSION 0x8243
+
/* Bitmask marking allocated devices from 0 to EM28XX_MAXBOARDS */
static unsigned long em28xx_devused;
@@ -139,6 +141,24 @@ static struct em28xx_reg_seq kworld_330u_digital[] = {
{ -1, -1, -1, -1},
};
+/* Evga inDtube
+ GPIO0 - Enable digital power (s5h1409) - low to enable
+ GPIO1 - Enable analog power (tvp5150/emp202) - low to enable
+ GPIO4 - xc3028 reset
+ GOP3 - s5h1409 reset
+ */
+static struct em28xx_reg_seq evga_indtube_analog[] = {
+ {EM28XX_R08_GPIO, 0x79, 0xff, 60},
+ { -1, -1, -1, -1},
+};
+
+static struct em28xx_reg_seq evga_indtube_digital[] = {
+ {EM28XX_R08_GPIO, 0x7a, 0xff, 1},
+ {EM2880_R04_GPO, 0x04, 0xff, 10},
+ {EM2880_R04_GPO, 0x0c, 0xff, 1},
+ { -1, -1, -1, -1},
+};
+
/* Callback for the most boards */
static struct em28xx_reg_seq default_tuner_gpio[] = {
{EM28XX_R08_GPIO, EM_GPIO_4, EM_GPIO_4, 10},
@@ -173,6 +193,13 @@ static struct em28xx_reg_seq terratec_av350_unmute_gpio[] = {
{EM28XX_R08_GPIO, 0xff, 0xff, 10},
{ -1, -1, -1, -1},
};
+
+static struct em28xx_reg_seq silvercrest_reg_seq[] = {
+ {EM28XX_R08_GPIO, 0xff, 0xff, 10},
+ {EM28XX_R08_GPIO, 0x01, 0xf7, 10},
+ { -1, -1, -1, -1},
+};
+
/*
* Board definitions
*/
@@ -420,6 +447,18 @@ struct em28xx_board em28xx_boards[] = {
.amux = EM28XX_AMUX_VIDEO,
} },
},
+ [EM2820_BOARD_SILVERCREST_WEBCAM] = {
+ .name = "Silvercrest Webcam 1.3mpix",
+ .tuner_type = TUNER_ABSENT,
+ .is_27xx = 1,
+ .decoder = EM28XX_MT9V011,
+ .input = { {
+ .type = EM28XX_VMUX_COMPOSITE1,
+ .vmux = 0,
+ .amux = EM28XX_AMUX_VIDEO,
+ .gpio = silvercrest_reg_seq,
+ } },
+ },
[EM2821_BOARD_SUPERCOMP_USB_2] = {
.name = "Supercomp USB 2.0 TV",
.valid = EM28XX_BOARD_NOT_VALIDATED,
@@ -808,7 +847,7 @@ struct em28xx_board em28xx_boards[] = {
.tuner_gpio = default_tuner_gpio,
.decoder = EM28XX_TVP5150,
.has_dvb = 1,
- .dvb_gpio = default_analog,
+ .dvb_gpio = default_digital,
.input = { {
.type = EM28XX_VMUX_TELEVISION,
.vmux = TVP5150_COMPOSITE0,
@@ -1449,6 +1488,33 @@ struct em28xx_board em28xx_boards[] = {
.gpio = terratec_av350_unmute_gpio,
} },
},
+ [EM2882_BOARD_EVGA_INDTUBE] = {
+ .name = "Evga inDtube",
+ .tuner_type = TUNER_XC2028,
+ .tuner_gpio = default_tuner_gpio,
+ .decoder = EM28XX_TVP5150,
+ .xclk = EM28XX_XCLK_FREQUENCY_12MHZ, /* NEC IR */
+ .mts_firmware = 1,
+ .has_dvb = 1,
+ .dvb_gpio = evga_indtube_digital,
+ .ir_codes = ir_codes_evga_indtube,
+ .input = { {
+ .type = EM28XX_VMUX_TELEVISION,
+ .vmux = TVP5150_COMPOSITE0,
+ .amux = EM28XX_AMUX_VIDEO,
+ .gpio = evga_indtube_analog,
+ }, {
+ .type = EM28XX_VMUX_COMPOSITE1,
+ .vmux = TVP5150_COMPOSITE1,
+ .amux = EM28XX_AMUX_LINE_IN,
+ .gpio = evga_indtube_analog,
+ }, {
+ .type = EM28XX_VMUX_SVIDEO,
+ .vmux = TVP5150_SVIDEO,
+ .amux = EM28XX_AMUX_LINE_IN,
+ .gpio = evga_indtube_analog,
+ } },
+ },
};
const unsigned int em28xx_bcount = ARRAY_SIZE(em28xx_boards);
@@ -1571,6 +1637,7 @@ static struct em28xx_hash_table em28xx_eeprom_hash[] = {
{0x72cc5a8b, EM2820_BOARD_PROLINK_PLAYTV_BOX4_USB2, TUNER_YMEC_TVF_5533MF},
{0x966a0441, EM2880_BOARD_KWORLD_DVB_310U, TUNER_XC2028},
{0x9567eb1a, EM2880_BOARD_EMPIRE_DUAL_TV, TUNER_XC2028},
+ {0xcee44a99, EM2882_BOARD_EVGA_INDTUBE, TUNER_XC2028},
};
/* I2C devicelist hash table for devices with generic USB IDs */
@@ -1593,6 +1660,11 @@ static unsigned short tvp5150_addrs[] = {
I2C_CLIENT_END
};
+static unsigned short mt9v011_addrs[] = {
+ 0xba >> 1,
+ I2C_CLIENT_END
+};
+
static unsigned short msp3400_addrs[] = {
0x80 >> 1,
0x88 >> 1,
@@ -1632,6 +1704,46 @@ static inline void em28xx_set_model(struct em28xx *dev)
EM28XX_I2C_FREQ_100_KHZ;
}
+/* HINT method: webcam I2C chips
+ *
+ * This method work for webcams with Micron sensors
+ */
+static int em28xx_hint_sensor(struct em28xx *dev)
+{
+ int rc;
+ char *sensor_name;
+ unsigned char cmd;
+ __be16 version_be;
+ u16 version;
+
+ if (dev->model != EM2820_BOARD_UNKNOWN)
+ return 0;
+
+ dev->i2c_client.addr = 0xba >> 1;
+ cmd = 0;
+ i2c_master_send(&dev->i2c_client, &cmd, 1);
+ rc = i2c_master_recv(&dev->i2c_client, (char *)&version_be, 2);
+ if (rc != 2)
+ return -EINVAL;
+
+ version = be16_to_cpu(version_be);
+
+ switch (version) {
+ case MT9V011_VERSION:
+ dev->model = EM2820_BOARD_SILVERCREST_WEBCAM;
+ sensor_name = "mt9v011";
+ break;
+ default:
+ printk("Unknown Sensor 0x%04x\n", be16_to_cpu(version));
+ return -EINVAL;
+ }
+
+ em28xx_errdev("Sensor is %s, assuming that webcam is %s\n",
+ sensor_name, em28xx_boards[dev->model].name);
+
+ return 0;
+}
+
/* Since em28xx_pre_card_setup() requires a proper dev->model,
* this won't work for boards with generic PCI IDs
*/
@@ -1660,7 +1772,10 @@ void em28xx_pre_card_setup(struct em28xx *dev)
em28xx_info("chip ID is em2750\n");
break;
case CHIP_ID_EM2820:
- em28xx_info("chip ID is em2820\n");
+ if (dev->board.is_27xx)
+ em28xx_info("chip is em2710\n");
+ else
+ em28xx_info("chip ID is em2820\n");
break;
case CHIP_ID_EM2840:
em28xx_info("chip ID is em2840\n");
@@ -1834,6 +1949,10 @@ static void em28xx_setup_xc3028(struct em28xx *dev, struct xc2028_ctrl *ctl)
ctl->demod = XC3028_FE_CHINA;
ctl->fname = XC2028_DEFAULT_FIRMWARE;
break;
+ case EM2882_BOARD_EVGA_INDTUBE:
+ ctl->demod = XC3028_FE_CHINA;
+ ctl->fname = XC3028L_DEFAULT_FIRMWARE;
+ break;
default:
ctl->demod = XC3028_FE_OREN538;
}
@@ -2101,7 +2220,17 @@ void em28xx_card_setup(struct em28xx *dev)
case EM2880_BOARD_MSI_DIGIVOX_AD:
if (!em28xx_hint_board(dev))
em28xx_set_model(dev);
+
+ /* In cases where we had to use a board hint, the call to
+ em28xx_set_mode() in em28xx_pre_card_setup() was a no-op,
+ so make the call now so the analog GPIOs are set properly
+ before probing the i2c bus. */
+ em28xx_set_mode(dev, EM28XX_ANALOG_MODE);
break;
+ case EM2820_BOARD_SILVERCREST_WEBCAM:
+ /* FIXME: need to document the registers bellow */
+ em28xx_write_reg(dev, 0x0d, 0x42);
+ em28xx_write_reg(dev, 0x13, 0x08);
}
if (dev->board.has_snapshot_button)
@@ -2133,6 +2262,10 @@ void em28xx_card_setup(struct em28xx *dev)
v4l2_i2c_new_probed_subdev(&dev->v4l2_dev, &dev->i2c_adap,
"tvp5150", "tvp5150", tvp5150_addrs);
+ if (dev->board.decoder == EM28XX_MT9V011)
+ v4l2_i2c_new_probed_subdev(&dev->v4l2_dev, &dev->i2c_adap,
+ "mt9v011", "mt9v011", mt9v011_addrs);
+
if (dev->board.adecoder == EM28XX_TVAUDIO)
v4l2_i2c_new_subdev(&dev->v4l2_dev, &dev->i2c_adap,
"tvaudio", "tvaudio", dev->board.tvaudio_addr);
@@ -2277,6 +2410,8 @@ static int em28xx_init_dev(struct em28xx **devhandle, struct usb_device *udev,
return errCode;
}
+ em28xx_hint_sensor(dev);
+
/* Do board specific init and eeprom reading */
em28xx_card_setup(dev);
@@ -2517,6 +2652,7 @@ static int em28xx_usb_probe(struct usb_interface *interface,
retval = em28xx_init_dev(&dev, udev, interface, nr);
if (retval) {
em28xx_devused &= ~(1<<dev->devno);
+ mutex_unlock(&dev->lock);
kfree(dev);
goto err;
}
diff --git a/drivers/media/video/em28xx/em28xx-core.c b/drivers/media/video/em28xx/em28xx-core.c
index c8d7ce8fbd36..079ab4d563a6 100644
--- a/drivers/media/video/em28xx/em28xx-core.c
+++ b/drivers/media/video/em28xx/em28xx-core.c
@@ -648,17 +648,28 @@ int em28xx_capture_start(struct em28xx *dev, int start)
int em28xx_set_outfmt(struct em28xx *dev)
{
int ret;
+ int vinmode, vinctl, outfmt;
+
+ outfmt = dev->format->reg;
+
+ if (dev->board.is_27xx) {
+ vinmode = 0x0d;
+ vinctl = 0x00;
+ } else {
+ vinmode = 0x10;
+ vinctl = 0x11;
+ }
ret = em28xx_write_reg_bits(dev, EM28XX_R27_OUTFMT,
- dev->format->reg | 0x20, 0x3f);
+ outfmt | 0x20, 0xff);
if (ret < 0)
- return ret;
+ return ret;
- ret = em28xx_write_reg(dev, EM28XX_R10_VINMODE, 0x10);
+ ret = em28xx_write_reg(dev, EM28XX_R10_VINMODE, vinmode);
if (ret < 0)
return ret;
- return em28xx_write_reg(dev, EM28XX_R11_VINCTRL, 0x11);
+ return em28xx_write_reg(dev, EM28XX_R11_VINCTRL, vinctl);
}
static int em28xx_accumulator_set(struct em28xx *dev, u8 xmin, u8 xmax,
@@ -695,13 +706,19 @@ static int em28xx_scaler_set(struct em28xx *dev, u16 h, u16 v)
{
u8 mode;
/* the em2800 scaler only supports scaling down to 50% */
- if (dev->board.is_em2800)
+
+ if (dev->board.is_27xx) {
+ /* FIXME: Don't use the scaler yet */
+ mode = 0;
+ } else if (dev->board.is_em2800) {
mode = (v ? 0x20 : 0x00) | (h ? 0x10 : 0x00);
- else {
+ } else {
u8 buf[2];
+
buf[0] = h;
buf[1] = h >> 8;
em28xx_write_regs(dev, EM28XX_R30_HSCALELOW, (char *)buf, 2);
+
buf[0] = v;
buf[1] = v >> 8;
em28xx_write_regs(dev, EM28XX_R32_VSCALELOW, (char *)buf, 2);
@@ -720,8 +737,11 @@ int em28xx_resolution_set(struct em28xx *dev)
height = norm_maxh(dev) >> 1;
em28xx_set_outfmt(dev);
+
+
em28xx_accumulator_set(dev, 1, (width - 4) >> 2, 1, (height - 4) >> 2);
em28xx_capture_area_set(dev, 0, 0, width >> 2, height >> 2);
+
return em28xx_scaler_set(dev, dev->hscale, dev->vscale);
}
diff --git a/drivers/media/video/em28xx/em28xx-dvb.c b/drivers/media/video/em28xx/em28xx-dvb.c
index 563dd2b1c8e9..3da97c32b8fa 100644
--- a/drivers/media/video/em28xx/em28xx-dvb.c
+++ b/drivers/media/video/em28xx/em28xx-dvb.c
@@ -243,6 +243,14 @@ static struct s5h1409_config em28xx_s5h1409_with_xc3028 = {
.mpeg_timing = S5H1409_MPEGTIMING_CONTINOUS_NONINVERTING_CLOCK
};
+static struct zl10353_config em28xx_terratec_xs_zl10353_xc3028 = {
+ .demod_address = (0x1e >> 1),
+ .no_tuner = 1,
+ .disable_i2c_gate_ctrl = 1,
+ .parallel_ts = 1,
+ .if2 = 45600,
+};
+
#ifdef EM28XX_DRX397XD_SUPPORT
/* [TODO] djh - not sure yet what the device config needs to contain */
static struct drx397xD_config em28xx_drx397xD_with_xc3028 = {
@@ -433,7 +441,6 @@ static int dvb_init(struct em28xx *dev)
}
break;
case EM2880_BOARD_HAUPPAUGE_WINTV_HVR_900:
- case EM2880_BOARD_TERRATEC_HYBRID_XS:
case EM2880_BOARD_KWORLD_DVB_310U:
case EM2880_BOARD_EMPIRE_DUAL_TV:
dvb->frontend = dvb_attach(zl10353_attach,
@@ -444,7 +451,27 @@ static int dvb_init(struct em28xx *dev)
goto out_free;
}
break;
+ case EM2880_BOARD_TERRATEC_HYBRID_XS:
+ dvb->frontend = dvb_attach(zl10353_attach,
+ &em28xx_terratec_xs_zl10353_xc3028,
+ &dev->i2c_adap);
+ if (dvb->frontend == NULL) {
+ /* This board could have either a zl10353 or a mt352.
+ If the chip id isn't for zl10353, try mt352 */
+
+ /* FIXME: make support for mt352 work */
+ printk(KERN_ERR "version of this board with mt352 not "
+ "currently supported\n");
+ result = -EINVAL;
+ goto out_free;
+ }
+ if (attach_xc3028(0x61, dev) < 0) {
+ result = -EINVAL;
+ goto out_free;
+ }
+ break;
case EM2883_BOARD_KWORLD_HYBRID_330U:
+ case EM2882_BOARD_EVGA_INDTUBE:
dvb->frontend = dvb_attach(s5h1409_attach,
&em28xx_s5h1409_with_xc3028,
&dev->i2c_adap);
diff --git a/drivers/media/video/em28xx/em28xx-i2c.c b/drivers/media/video/em28xx/em28xx-i2c.c
index 2c86fcf089f5..27e33a287dfc 100644
--- a/drivers/media/video/em28xx/em28xx-i2c.c
+++ b/drivers/media/video/em28xx/em28xx-i2c.c
@@ -483,7 +483,7 @@ static char *i2c_devs[128] = {
[0xa0 >> 1] = "eeprom",
[0xb0 >> 1] = "tda9874",
[0xb8 >> 1] = "tvp5150a",
- [0xba >> 1] = "tvp5150a",
+ [0xba >> 1] = "webcam sensor or tvp5150a",
[0xc0 >> 1] = "tuner (analog)",
[0xc2 >> 1] = "tuner (analog)",
[0xc4 >> 1] = "tuner (analog)",
diff --git a/drivers/media/video/em28xx/em28xx-video.c b/drivers/media/video/em28xx/em28xx-video.c
index 882796e84dbc..14316c912179 100644
--- a/drivers/media/video/em28xx/em28xx-video.c
+++ b/drivers/media/video/em28xx/em28xx-video.c
@@ -90,10 +90,35 @@ MODULE_PARM_DESC(video_debug, "enable debug messages [video]");
/* supported video standards */
static struct em28xx_fmt format[] = {
{
- .name = "16bpp YUY2, 4:2:2, packed",
+ .name = "16 bpp YUY2, 4:2:2, packed",
.fourcc = V4L2_PIX_FMT_YUYV,
.depth = 16,
.reg = EM28XX_OUTFMT_YUV422_Y0UY1V,
+ }, {
+ .name = "16 bpp RGB 565, LE",
+ .fourcc = V4L2_PIX_FMT_RGB565,
+ .depth = 16,
+ .reg = EM28XX_OUTFMT_RGB_16_656,
+ }, {
+ .name = "8 bpp Bayer BGBG..GRGR",
+ .fourcc = V4L2_PIX_FMT_SBGGR8,
+ .depth = 8,
+ .reg = EM28XX_OUTFMT_RGB_8_BGBG,
+ }, {
+ .name = "8 bpp Bayer GRGR..BGBG",
+ .fourcc = V4L2_PIX_FMT_SGRBG8,
+ .depth = 8,
+ .reg = EM28XX_OUTFMT_RGB_8_GRGR,
+ }, {
+ .name = "8 bpp Bayer GBGB..RGRG",
+ .fourcc = V4L2_PIX_FMT_SGBRG8,
+ .depth = 8,
+ .reg = EM28XX_OUTFMT_RGB_8_GBGB,
+ }, {
+ .name = "12 bpp YUV411",
+ .fourcc = V4L2_PIX_FMT_YUV411P,
+ .depth = 12,
+ .reg = EM28XX_OUTFMT_YUV411,
},
};
@@ -687,8 +712,8 @@ static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
{
struct em28xx_fh *fh = priv;
struct em28xx *dev = fh->dev;
- int width = f->fmt.pix.width;
- int height = f->fmt.pix.height;
+ unsigned int width = f->fmt.pix.width;
+ unsigned int height = f->fmt.pix.height;
unsigned int maxw = norm_maxw(dev);
unsigned int maxh = norm_maxh(dev);
unsigned int hscale, vscale;
@@ -701,34 +726,24 @@ static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
return -EINVAL;
}
- /* width must even because of the YUYV format
- height must be even because of interlacing */
- height &= 0xfffe;
- width &= 0xfffe;
-
- if (unlikely(height < 32))
- height = 32;
- if (unlikely(height > maxh))
- height = maxh;
- if (unlikely(width < 48))
- width = 48;
- if (unlikely(width > maxw))
- width = maxw;
-
- if (dev->board.is_em2800) {
+ if (dev->board.is_27xx) {
+ /* FIXME: This is the only supported fmt */
+ width = 640;
+ height = 480;
+ } else if (dev->board.is_em2800) {
/* the em2800 can only scale down to 50% */
- if (height % (maxh / 2))
- height = maxh;
- if (width % (maxw / 2))
- width = maxw;
- /* according to empiatech support */
- /* the MaxPacketSize is to small to support */
- /* framesizes larger than 640x480 @ 30 fps */
- /* or 640x576 @ 25 fps. As this would cut */
- /* of a part of the image we prefer */
- /* 360x576 or 360x480 for now */
+ height = height > (3 * maxh / 4) ? maxh : maxh / 2;
+ width = width > (3 * maxw / 4) ? maxw : maxw / 2;
+ /* According to empiatech support the MaxPacketSize is too small
+ * to support framesizes larger than 640x480 @ 30 fps or 640x576
+ * @ 25 fps. As this would cut of a part of the image we prefer
+ * 360x576 or 360x480 for now */
if (width == maxw && height == maxh)
width /= 2;
+ } else {
+ /* width must even because of the YUYV format
+ height must be even because of interlacing */
+ v4l_bound_align_image(&width, 48, maxw, 1, &height, 32, maxh, 1, 0);
}
get_scale(dev, width, height, &hscale, &vscale);
@@ -747,13 +762,40 @@ static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
return 0;
}
+static int em28xx_set_video_format(struct em28xx *dev, unsigned int fourcc,
+ unsigned width, unsigned height)
+{
+ struct em28xx_fmt *fmt;
+
+ /* FIXME: This is the only supported fmt */
+ if (dev->board.is_27xx) {
+ width = 640;
+ height = 480;
+ }
+
+ fmt = format_by_fourcc(fourcc);
+ if (!fmt)
+ return -EINVAL;
+
+ dev->format = fmt;
+ dev->width = width;
+ dev->height = height;
+
+ /* set new image size */
+ get_scale(dev, dev->width, dev->height, &dev->hscale, &dev->vscale);
+
+ em28xx_set_alternate(dev);
+ em28xx_resolution_set(dev);
+
+ return 0;
+}
+
static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
struct em28xx_fh *fh = priv;
struct em28xx *dev = fh->dev;
int rc;
- struct em28xx_fmt *fmt;
rc = check_dev(dev);
if (rc < 0)
@@ -763,12 +805,6 @@ static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
vidioc_try_fmt_vid_cap(file, priv, f);
- fmt = format_by_fourcc(f->fmt.pix.pixelformat);
- if (!fmt) {
- rc = -EINVAL;
- goto out;
- }
-
if (videobuf_queue_is_busy(&fh->vb_vidq)) {
em28xx_errdev("%s queue busy\n", __func__);
rc = -EBUSY;
@@ -781,16 +817,8 @@ static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
goto out;
}
- /* set new image size */
- dev->width = f->fmt.pix.width;
- dev->height = f->fmt.pix.height;
- dev->format = fmt;
- get_scale(dev, dev->width, dev->height, &dev->hscale, &dev->vscale);
-
- em28xx_set_alternate(dev);
- em28xx_resolution_set(dev);
-
- rc = 0;
+ rc = em28xx_set_video_format(dev, f->fmt.pix.pixelformat,
+ f->fmt.pix.width, f->fmt.pix.height);
out:
mutex_unlock(&dev->lock);
@@ -1630,11 +1658,6 @@ static int em28xx_v4l2_open(struct file *filp)
filp->private_data = fh;
if (fh->type == V4L2_BUF_TYPE_VIDEO_CAPTURE && dev->users == 0) {
- dev->width = norm_maxw(dev);
- dev->height = norm_maxh(dev);
- dev->hscale = 0;
- dev->vscale = 0;
-
em28xx_set_mode(dev, EM28XX_ANALOG_MODE);
em28xx_set_alternate(dev);
em28xx_resolution_set(dev);
@@ -1976,15 +1999,14 @@ int em28xx_register_analog_devices(struct em28xx *dev)
/* set default norm */
dev->norm = em28xx_video_template.current_norm;
- dev->width = norm_maxw(dev);
- dev->height = norm_maxh(dev);
dev->interlaced = EM28XX_INTERLACED_DEFAULT;
- dev->hscale = 0;
- dev->vscale = 0;
dev->ctl_input = 0;
/* Analog specific initialization */
dev->format = &format[0];
+ em28xx_set_video_format(dev, format[0].fourcc,
+ norm_maxw(dev), norm_maxh(dev));
+
video_mux(dev, dev->ctl_input);
/* Audio defaults */
diff --git a/drivers/media/video/em28xx/em28xx.h b/drivers/media/video/em28xx/em28xx.h
index 8bf81be1da61..d90fef463764 100644
--- a/drivers/media/video/em28xx/em28xx.h
+++ b/drivers/media/video/em28xx/em28xx.h
@@ -106,6 +106,8 @@
#define EM2860_BOARD_TERRATEC_GRABBY 67
#define EM2860_BOARD_TERRATEC_AV350 68
#define EM2882_BOARD_KWORLD_ATSC_315U 69
+#define EM2882_BOARD_EVGA_INDTUBE 70
+#define EM2820_BOARD_SILVERCREST_WEBCAM 71
/* Limits minimum and default number of buffers */
#define EM28XX_MIN_BUF 4
@@ -359,6 +361,7 @@ enum em28xx_decoder {
EM28XX_NODECODER,
EM28XX_TVP5150,
EM28XX_SAA711X,
+ EM28XX_MT9V011,
};
enum em28xx_adecoder {
@@ -387,6 +390,7 @@ struct em28xx_board {
unsigned int max_range_640_480:1;
unsigned int has_dvb:1;
unsigned int has_snapshot_button:1;
+ unsigned int is_27xx:1;
unsigned int valid:1;
unsigned char xclk, i2c_speed;
diff --git a/drivers/media/video/gspca/gspca.c b/drivers/media/video/gspca/gspca.c
index f7e0355ad644..1e89600986c8 100644
--- a/drivers/media/video/gspca/gspca.c
+++ b/drivers/media/video/gspca/gspca.c
@@ -1042,13 +1042,11 @@ static int vidioc_queryctrl(struct file *file, void *priv,
for (i = 0; i < gspca_dev->sd_desc->nctrls; i++) {
if (gspca_dev->ctrl_dis & (1 << i))
continue;
- if (ctrls->qctrl.id < id)
+ if (gspca_dev->sd_desc->ctrls[i].qctrl.id < id)
continue;
- if (ctrls != NULL) {
- if (gspca_dev->sd_desc->ctrls[i].qctrl.id
+ if (ctrls && gspca_dev->sd_desc->ctrls[i].qctrl.id
> ctrls->qctrl.id)
- continue;
- }
+ continue;
ctrls = &gspca_dev->sd_desc->ctrls[i];
}
} else {
diff --git a/drivers/media/video/gspca/ov519.c b/drivers/media/video/gspca/ov519.c
index 188866ac6cef..2f6e135d94bc 100644
--- a/drivers/media/video/gspca/ov519.c
+++ b/drivers/media/video/gspca/ov519.c
@@ -50,12 +50,18 @@ static int i2c_detect_tries = 10;
struct sd {
struct gspca_dev gspca_dev; /* !! must be the first item */
+ __u8 packet_nr;
+
char bridge;
#define BRIDGE_OV511 0
#define BRIDGE_OV511PLUS 1
#define BRIDGE_OV518 2
#define BRIDGE_OV518PLUS 3
#define BRIDGE_OV519 4
+#define BRIDGE_MASK 7
+
+ char invert_led;
+#define BRIDGE_INVERT_LED 8
/* Determined by sensor type */
__u8 sif;
@@ -65,22 +71,25 @@ struct sd {
__u8 colors;
__u8 hflip;
__u8 vflip;
+ __u8 autobrightness;
+ __u8 freq;
__u8 stopped; /* Streaming is temporarily paused */
- __u8 frame_rate; /* current Framerate (OV519 only) */
- __u8 clockdiv; /* clockdiv override for OV519 only */
+ __u8 frame_rate; /* current Framerate */
+ __u8 clockdiv; /* clockdiv override */
char sensor; /* Type of image sensor chip (SEN_*) */
#define SEN_UNKNOWN 0
#define SEN_OV6620 1
#define SEN_OV6630 2
-#define SEN_OV7610 3
-#define SEN_OV7620 4
-#define SEN_OV7640 5
-#define SEN_OV7670 6
-#define SEN_OV76BE 7
-#define SEN_OV8610 8
+#define SEN_OV66308AF 3
+#define SEN_OV7610 4
+#define SEN_OV7620 5
+#define SEN_OV7640 6
+#define SEN_OV7670 7
+#define SEN_OV76BE 8
+#define SEN_OV8610 9
};
/* V4L2 controls supported by the driver */
@@ -94,11 +103,17 @@ static int sd_sethflip(struct gspca_dev *gspca_dev, __s32 val);
static int sd_gethflip(struct gspca_dev *gspca_dev, __s32 *val);
static int sd_setvflip(struct gspca_dev *gspca_dev, __s32 val);
static int sd_getvflip(struct gspca_dev *gspca_dev, __s32 *val);
+static int sd_setautobrightness(struct gspca_dev *gspca_dev, __s32 val);
+static int sd_getautobrightness(struct gspca_dev *gspca_dev, __s32 *val);
+static int sd_setfreq(struct gspca_dev *gspca_dev, __s32 val);
+static int sd_getfreq(struct gspca_dev *gspca_dev, __s32 *val);
static void setbrightness(struct gspca_dev *gspca_dev);
static void setcontrast(struct gspca_dev *gspca_dev);
static void setcolors(struct gspca_dev *gspca_dev);
+static void setautobrightness(struct sd *sd);
+static void setfreq(struct sd *sd);
-static struct ctrl sd_ctrls[] = {
+static const struct ctrl sd_ctrls[] = {
{
{
.id = V4L2_CID_BRIGHTNESS,
@@ -141,7 +156,7 @@ static struct ctrl sd_ctrls[] = {
.set = sd_setcolors,
.get = sd_getcolors,
},
-/* next controls work with ov7670 only */
+/* The flip controls work with ov7670 only */
#define HFLIP_IDX 3
{
{
@@ -172,6 +187,51 @@ static struct ctrl sd_ctrls[] = {
.set = sd_setvflip,
.get = sd_getvflip,
},
+#define AUTOBRIGHT_IDX 5
+ {
+ {
+ .id = V4L2_CID_AUTOBRIGHTNESS,
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .name = "Auto Brightness",
+ .minimum = 0,
+ .maximum = 1,
+ .step = 1,
+#define AUTOBRIGHT_DEF 1
+ .default_value = AUTOBRIGHT_DEF,
+ },
+ .set = sd_setautobrightness,
+ .get = sd_getautobrightness,
+ },
+#define FREQ_IDX 6
+ {
+ {
+ .id = V4L2_CID_POWER_LINE_FREQUENCY,
+ .type = V4L2_CTRL_TYPE_MENU,
+ .name = "Light frequency filter",
+ .minimum = 0,
+ .maximum = 2, /* 0: 0, 1: 50Hz, 2:60Hz */
+ .step = 1,
+#define FREQ_DEF 0
+ .default_value = FREQ_DEF,
+ },
+ .set = sd_setfreq,
+ .get = sd_getfreq,
+ },
+#define OV7670_FREQ_IDX 7
+ {
+ {
+ .id = V4L2_CID_POWER_LINE_FREQUENCY,
+ .type = V4L2_CTRL_TYPE_MENU,
+ .name = "Light frequency filter",
+ .minimum = 0,
+ .maximum = 3, /* 0: 0, 1: 50Hz, 2:60Hz 3: Auto Hz */
+ .step = 1,
+#define OV7670_FREQ_DEF 3
+ .default_value = OV7670_FREQ_DEF,
+ },
+ .set = sd_setfreq,
+ .get = sd_getfreq,
+ },
};
static const struct v4l2_pix_format ov519_vga_mode[] = {
@@ -187,11 +247,21 @@ static const struct v4l2_pix_format ov519_vga_mode[] = {
.priv = 0},
};
static const struct v4l2_pix_format ov519_sif_mode[] = {
+ {160, 120, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE,
+ .bytesperline = 160,
+ .sizeimage = 160 * 120 * 3 / 8 + 590,
+ .colorspace = V4L2_COLORSPACE_JPEG,
+ .priv = 3},
{176, 144, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE,
.bytesperline = 176,
.sizeimage = 176 * 144 * 3 / 8 + 590,
.colorspace = V4L2_COLORSPACE_JPEG,
.priv = 1},
+ {320, 240, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE,
+ .bytesperline = 320,
+ .sizeimage = 320 * 240 * 3 / 8 + 590,
+ .colorspace = V4L2_COLORSPACE_JPEG,
+ .priv = 2},
{352, 288, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE,
.bytesperline = 352,
.sizeimage = 352 * 288 * 3 / 8 + 590,
@@ -199,42 +269,118 @@ static const struct v4l2_pix_format ov519_sif_mode[] = {
.priv = 0},
};
+/* Note some of the sizeimage values for the ov511 / ov518 may seem
+ larger then necessary, however they need to be this big as the ov511 /
+ ov518 always fills the entire isoc frame, using 0 padding bytes when
+ it doesn't have any data. So with low framerates the amount of data
+ transfered can become quite large (libv4l will remove all the 0 padding
+ in userspace). */
static const struct v4l2_pix_format ov518_vga_mode[] = {
{320, 240, V4L2_PIX_FMT_OV518, V4L2_FIELD_NONE,
.bytesperline = 320,
- .sizeimage = 320 * 240 * 3 / 8 + 590,
+ .sizeimage = 320 * 240 * 3,
.colorspace = V4L2_COLORSPACE_JPEG,
.priv = 1},
{640, 480, V4L2_PIX_FMT_OV518, V4L2_FIELD_NONE,
.bytesperline = 640,
- .sizeimage = 640 * 480 * 3 / 8 + 590,
+ .sizeimage = 640 * 480 * 2,
.colorspace = V4L2_COLORSPACE_JPEG,
.priv = 0},
};
static const struct v4l2_pix_format ov518_sif_mode[] = {
+ {160, 120, V4L2_PIX_FMT_OV518, V4L2_FIELD_NONE,
+ .bytesperline = 160,
+ .sizeimage = 70000,
+ .colorspace = V4L2_COLORSPACE_JPEG,
+ .priv = 3},
{176, 144, V4L2_PIX_FMT_OV518, V4L2_FIELD_NONE,
.bytesperline = 176,
- .sizeimage = 40000,
+ .sizeimage = 70000,
.colorspace = V4L2_COLORSPACE_JPEG,
.priv = 1},
+ {320, 240, V4L2_PIX_FMT_OV518, V4L2_FIELD_NONE,
+ .bytesperline = 320,
+ .sizeimage = 320 * 240 * 3,
+ .colorspace = V4L2_COLORSPACE_JPEG,
+ .priv = 2},
{352, 288, V4L2_PIX_FMT_OV518, V4L2_FIELD_NONE,
.bytesperline = 352,
- .sizeimage = 352 * 288 * 3 / 8 + 590,
+ .sizeimage = 352 * 288 * 3,
.colorspace = V4L2_COLORSPACE_JPEG,
.priv = 0},
};
+static const struct v4l2_pix_format ov511_vga_mode[] = {
+ {320, 240, V4L2_PIX_FMT_OV511, V4L2_FIELD_NONE,
+ .bytesperline = 320,
+ .sizeimage = 320 * 240 * 3,
+ .colorspace = V4L2_COLORSPACE_JPEG,
+ .priv = 1},
+ {640, 480, V4L2_PIX_FMT_OV511, V4L2_FIELD_NONE,
+ .bytesperline = 640,
+ .sizeimage = 640 * 480 * 2,
+ .colorspace = V4L2_COLORSPACE_JPEG,
+ .priv = 0},
+};
+static const struct v4l2_pix_format ov511_sif_mode[] = {
+ {160, 120, V4L2_PIX_FMT_OV511, V4L2_FIELD_NONE,
+ .bytesperline = 160,
+ .sizeimage = 70000,
+ .colorspace = V4L2_COLORSPACE_JPEG,
+ .priv = 3},
+ {176, 144, V4L2_PIX_FMT_OV511, V4L2_FIELD_NONE,
+ .bytesperline = 176,
+ .sizeimage = 70000,
+ .colorspace = V4L2_COLORSPACE_JPEG,
+ .priv = 1},
+ {320, 240, V4L2_PIX_FMT_OV511, V4L2_FIELD_NONE,
+ .bytesperline = 320,
+ .sizeimage = 320 * 240 * 3,
+ .colorspace = V4L2_COLORSPACE_JPEG,
+ .priv = 2},
+ {352, 288, V4L2_PIX_FMT_OV511, V4L2_FIELD_NONE,
+ .bytesperline = 352,
+ .sizeimage = 352 * 288 * 3,
+ .colorspace = V4L2_COLORSPACE_JPEG,
+ .priv = 0},
+};
/* Registers common to OV511 / OV518 */
+#define R51x_FIFO_PSIZE 0x30 /* 2 bytes wide w/ OV518(+) */
#define R51x_SYS_RESET 0x50
+ /* Reset type flags */
+ #define OV511_RESET_OMNICE 0x08
#define R51x_SYS_INIT 0x53
#define R51x_SYS_SNAP 0x52
#define R51x_SYS_CUST_ID 0x5F
#define R51x_COMP_LUT_BEGIN 0x80
/* OV511 Camera interface register numbers */
+#define R511_CAM_DELAY 0x10
+#define R511_CAM_EDGE 0x11
+#define R511_CAM_PXCNT 0x12
+#define R511_CAM_LNCNT 0x13
+#define R511_CAM_PXDIV 0x14
+#define R511_CAM_LNDIV 0x15
+#define R511_CAM_UV_EN 0x16
+#define R511_CAM_LINE_MODE 0x17
+#define R511_CAM_OPTS 0x18
+
+#define R511_SNAP_FRAME 0x19
+#define R511_SNAP_PXCNT 0x1A
+#define R511_SNAP_LNCNT 0x1B
+#define R511_SNAP_PXDIV 0x1C
+#define R511_SNAP_LNDIV 0x1D
+#define R511_SNAP_UV_EN 0x1E
+#define R511_SNAP_UV_EN 0x1E
+#define R511_SNAP_OPTS 0x1F
+
+#define R511_DRAM_FLOW_CTL 0x20
+#define R511_FIFO_OPTS 0x31
+#define R511_I2C_CTL 0x40
#define R511_SYS_LED_CTL 0x55 /* OV511+ only */
-#define OV511_RESET_NOREGS 0x3F /* All but OV511 & regs */
+#define R511_COMP_EN 0x78
+#define R511_COMP_LUT_EN 0x79
/* OV518 Camera interface register numbers */
#define R518_GPIO_OUT 0x56 /* OV518(+) only */
@@ -383,7 +529,7 @@ static const struct ov_i2c_regvals norm_6x20[] = {
{ 0x28, 0x05 },
{ 0x2a, 0x04 }, /* Disable framerate adjust */
/* { 0x2b, 0xac }, * Framerate; Set 2a[7] first */
- { 0x2d, 0x99 },
+ { 0x2d, 0x85 },
{ 0x33, 0xa0 }, /* Color Processing Parameter */
{ 0x34, 0xd2 }, /* Max A/D range */
{ 0x38, 0x8b },
@@ -416,7 +562,7 @@ static const struct ov_i2c_regvals norm_6x30[] = {
{ 0x07, 0x2d }, /* Sharpness */
{ 0x0c, 0x20 },
{ 0x0d, 0x20 },
- { 0x0e, 0x20 },
+ { 0x0e, 0xa0 }, /* Was 0x20, bit7 enables a 2x gain which we need */
{ 0x0f, 0x05 },
{ 0x10, 0x9a },
{ 0x11, 0x00 }, /* Pixel clock = fastest */
@@ -558,7 +704,7 @@ static const struct ov_i2c_regvals norm_7620[] = {
{ 0x23, 0x00 },
{ 0x26, 0xa2 },
{ 0x27, 0xea },
- { 0x28, 0x20 },
+ { 0x28, 0x22 }, /* Was 0x20, bit1 enables a 2x gain which we need */
{ 0x29, 0x00 },
{ 0x2a, 0x10 },
{ 0x2b, 0x00 },
@@ -999,13 +1145,128 @@ static int ov518_reg_w32(struct sd *sd, __u16 index, u32 value, int n)
return ret;
}
+static int ov511_i2c_w(struct sd *sd, __u8 reg, __u8 value)
+{
+ int rc, retries;
+
+ PDEBUG(D_USBO, "i2c 0x%02x -> [0x%02x]", value, reg);
+
+ /* Three byte write cycle */
+ for (retries = 6; ; ) {
+ /* Select camera register */
+ rc = reg_w(sd, R51x_I2C_SADDR_3, reg);
+ if (rc < 0)
+ return rc;
+
+ /* Write "value" to I2C data port of OV511 */
+ rc = reg_w(sd, R51x_I2C_DATA, value);
+ if (rc < 0)
+ return rc;
+
+ /* Initiate 3-byte write cycle */
+ rc = reg_w(sd, R511_I2C_CTL, 0x01);
+ if (rc < 0)
+ return rc;
+
+ do
+ rc = reg_r(sd, R511_I2C_CTL);
+ while (rc > 0 && ((rc & 1) == 0)); /* Retry until idle */
+
+ if (rc < 0)
+ return rc;
+
+ if ((rc & 2) == 0) /* Ack? */
+ break;
+ if (--retries < 0) {
+ PDEBUG(D_USBO, "i2c write retries exhausted");
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+static int ov511_i2c_r(struct sd *sd, __u8 reg)
+{
+ int rc, value, retries;
+
+ /* Two byte write cycle */
+ for (retries = 6; ; ) {
+ /* Select camera register */
+ rc = reg_w(sd, R51x_I2C_SADDR_2, reg);
+ if (rc < 0)
+ return rc;
+
+ /* Initiate 2-byte write cycle */
+ rc = reg_w(sd, R511_I2C_CTL, 0x03);
+ if (rc < 0)
+ return rc;
+
+ do
+ rc = reg_r(sd, R511_I2C_CTL);
+ while (rc > 0 && ((rc & 1) == 0)); /* Retry until idle */
+
+ if (rc < 0)
+ return rc;
+
+ if ((rc & 2) == 0) /* Ack? */
+ break;
+
+ /* I2C abort */
+ reg_w(sd, R511_I2C_CTL, 0x10);
+
+ if (--retries < 0) {
+ PDEBUG(D_USBI, "i2c write retries exhausted");
+ return -1;
+ }
+ }
+
+ /* Two byte read cycle */
+ for (retries = 6; ; ) {
+ /* Initiate 2-byte read cycle */
+ rc = reg_w(sd, R511_I2C_CTL, 0x05);
+ if (rc < 0)
+ return rc;
+
+ do
+ rc = reg_r(sd, R511_I2C_CTL);
+ while (rc > 0 && ((rc & 1) == 0)); /* Retry until idle */
+
+ if (rc < 0)
+ return rc;
+
+ if ((rc & 2) == 0) /* Ack? */
+ break;
+
+ /* I2C abort */
+ rc = reg_w(sd, R511_I2C_CTL, 0x10);
+ if (rc < 0)
+ return rc;
+
+ if (--retries < 0) {
+ PDEBUG(D_USBI, "i2c read retries exhausted");
+ return -1;
+ }
+ }
+
+ value = reg_r(sd, R51x_I2C_DATA);
+
+ PDEBUG(D_USBI, "i2c [0x%02X] -> 0x%02X", reg, value);
+
+ /* This is needed to make i2c_w() work */
+ rc = reg_w(sd, R511_I2C_CTL, 0x05);
+ if (rc < 0)
+ return rc;
+
+ return value;
+}
/*
* The OV518 I2C I/O procedure is different, hence, this function.
* This is normally only called from i2c_w(). Note that this function
* always succeeds regardless of whether the sensor is present and working.
*/
-static int i2c_w(struct sd *sd,
+static int ov518_i2c_w(struct sd *sd,
__u8 reg,
__u8 value)
{
@@ -1040,7 +1301,7 @@ static int i2c_w(struct sd *sd,
* This is normally only called from i2c_r(). Note that this function
* always succeeds regardless of whether the sensor is present and working.
*/
-static int i2c_r(struct sd *sd, __u8 reg)
+static int ov518_i2c_r(struct sd *sd, __u8 reg)
{
int rc, value;
@@ -1063,6 +1324,34 @@ static int i2c_r(struct sd *sd, __u8 reg)
return value;
}
+static int i2c_w(struct sd *sd, __u8 reg, __u8 value)
+{
+ switch (sd->bridge) {
+ case BRIDGE_OV511:
+ case BRIDGE_OV511PLUS:
+ return ov511_i2c_w(sd, reg, value);
+ case BRIDGE_OV518:
+ case BRIDGE_OV518PLUS:
+ case BRIDGE_OV519:
+ return ov518_i2c_w(sd, reg, value);
+ }
+ return -1; /* Should never happen */
+}
+
+static int i2c_r(struct sd *sd, __u8 reg)
+{
+ switch (sd->bridge) {
+ case BRIDGE_OV511:
+ case BRIDGE_OV511PLUS:
+ return ov511_i2c_r(sd, reg);
+ case BRIDGE_OV518:
+ case BRIDGE_OV518PLUS:
+ case BRIDGE_OV519:
+ return ov518_i2c_r(sd, reg);
+ }
+ return -1; /* Should never happen */
+}
+
/* Writes bits at positions specified by mask to an I2C reg. Bits that are in
* the same position as 1's in "mask" are cleared and set to "value". Bits
* that are in the same position as 0's in "mask" are preserved, regardless
@@ -1242,7 +1531,6 @@ static int ov8xx0_configure(struct sd *sd)
}
/* Set sensor-specific vars */
-/* sd->sif = 0; already done */
return 0;
}
@@ -1279,15 +1567,13 @@ static int ov7xx0_configure(struct sd *sd)
}
} else if ((rc & 3) == 1) {
/* I don't know what's different about the 76BE yet. */
- if (i2c_r(sd, 0x15) & 1)
+ if (i2c_r(sd, 0x15) & 1) {
PDEBUG(D_PROBE, "Sensor is an OV7620AE");
- else
+ sd->sensor = SEN_OV7620;
+ } else {
PDEBUG(D_PROBE, "Sensor is an OV76BE");
-
- /* OV511+ will return all zero isoc data unless we
- * configure the sensor as a 7620. Someone needs to
- * find the exact reg. setting that causes this. */
- sd->sensor = SEN_OV76BE;
+ sd->sensor = SEN_OV76BE;
+ }
} else if ((rc & 3) == 0) {
/* try to read product id registers */
high = i2c_r(sd, 0x0a);
@@ -1333,7 +1619,6 @@ static int ov7xx0_configure(struct sd *sd)
}
/* Set sensor-specific vars */
-/* sd->sif = 0; already done */
return 0;
}
@@ -1362,13 +1647,14 @@ static int ov6xx0_configure(struct sd *sd)
break;
case 0x01:
sd->sensor = SEN_OV6620;
+ PDEBUG(D_PROBE, "Sensor is an OV6620");
break;
case 0x02:
sd->sensor = SEN_OV6630;
PDEBUG(D_PROBE, "Sensor is an OV66308AE");
break;
case 0x03:
- sd->sensor = SEN_OV6630;
+ sd->sensor = SEN_OV66308AF;
PDEBUG(D_PROBE, "Sensor is an OV66308AF");
break;
case 0x90:
@@ -1391,6 +1677,9 @@ static int ov6xx0_configure(struct sd *sd)
/* Turns on or off the LED. Only has an effect with OV511+/OV518(+)/OV519 */
static void ov51x_led_control(struct sd *sd, int on)
{
+ if (sd->invert_led)
+ on = !on;
+
switch (sd->bridge) {
/* OV511 has no LED control */
case BRIDGE_OV511PLUS:
@@ -1406,9 +1695,31 @@ static void ov51x_led_control(struct sd *sd, int on)
}
}
-/* OV518 quantization tables are 8x4 (instead of 8x8) */
-static int ov518_upload_quan_tables(struct sd *sd)
+static int ov51x_upload_quan_tables(struct sd *sd)
{
+ const unsigned char yQuanTable511[] = {
+ 0, 1, 1, 2, 2, 3, 3, 4,
+ 1, 1, 1, 2, 2, 3, 4, 4,
+ 1, 1, 2, 2, 3, 4, 4, 4,
+ 2, 2, 2, 3, 4, 4, 4, 4,
+ 2, 2, 3, 4, 4, 5, 5, 5,
+ 3, 3, 4, 4, 5, 5, 5, 5,
+ 3, 4, 4, 4, 5, 5, 5, 5,
+ 4, 4, 4, 4, 5, 5, 5, 5
+ };
+
+ const unsigned char uvQuanTable511[] = {
+ 0, 2, 2, 3, 4, 4, 4, 4,
+ 2, 2, 2, 4, 4, 4, 4, 4,
+ 2, 2, 3, 4, 4, 4, 4, 4,
+ 3, 4, 4, 4, 4, 4, 4, 4,
+ 4, 4, 4, 4, 4, 4, 4, 4,
+ 4, 4, 4, 4, 4, 4, 4, 4,
+ 4, 4, 4, 4, 4, 4, 4, 4,
+ 4, 4, 4, 4, 4, 4, 4, 4
+ };
+
+ /* OV518 quantization tables are 8x4 (instead of 8x8) */
const unsigned char yQuanTable518[] = {
5, 4, 5, 6, 6, 7, 7, 7,
5, 5, 5, 5, 6, 7, 7, 7,
@@ -1423,14 +1734,23 @@ static int ov518_upload_quan_tables(struct sd *sd)
7, 7, 7, 7, 7, 7, 8, 8
};
- const unsigned char *pYTable = yQuanTable518;
- const unsigned char *pUVTable = uvQuanTable518;
+ const unsigned char *pYTable, *pUVTable;
unsigned char val0, val1;
- int i, rc, reg = R51x_COMP_LUT_BEGIN;
+ int i, size, rc, reg = R51x_COMP_LUT_BEGIN;
PDEBUG(D_PROBE, "Uploading quantization tables");
- for (i = 0; i < 16; i++) {
+ if (sd->bridge == BRIDGE_OV511 || sd->bridge == BRIDGE_OV511PLUS) {
+ pYTable = yQuanTable511;
+ pUVTable = uvQuanTable511;
+ size = 32;
+ } else {
+ pYTable = yQuanTable518;
+ pUVTable = uvQuanTable518;
+ size = 16;
+ }
+
+ for (i = 0; i < size; i++) {
val0 = *pYTable++;
val1 = *pYTable++;
val0 &= 0x0f;
@@ -1445,7 +1765,7 @@ static int ov518_upload_quan_tables(struct sd *sd)
val0 &= 0x0f;
val1 &= 0x0f;
val0 |= val1 << 4;
- rc = reg_w(sd, reg + 16, val0);
+ rc = reg_w(sd, reg + size, val0);
if (rc < 0)
return rc;
@@ -1455,6 +1775,87 @@ static int ov518_upload_quan_tables(struct sd *sd)
return 0;
}
+/* This initializes the OV511/OV511+ and the sensor */
+static int ov511_configure(struct gspca_dev *gspca_dev)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+ int rc;
+
+ /* For 511 and 511+ */
+ const struct ov_regvals init_511[] = {
+ { R51x_SYS_RESET, 0x7f },
+ { R51x_SYS_INIT, 0x01 },
+ { R51x_SYS_RESET, 0x7f },
+ { R51x_SYS_INIT, 0x01 },
+ { R51x_SYS_RESET, 0x3f },
+ { R51x_SYS_INIT, 0x01 },
+ { R51x_SYS_RESET, 0x3d },
+ };
+
+ const struct ov_regvals norm_511[] = {
+ { R511_DRAM_FLOW_CTL, 0x01 },
+ { R51x_SYS_SNAP, 0x00 },
+ { R51x_SYS_SNAP, 0x02 },
+ { R51x_SYS_SNAP, 0x00 },
+ { R511_FIFO_OPTS, 0x1f },
+ { R511_COMP_EN, 0x00 },
+ { R511_COMP_LUT_EN, 0x03 },
+ };
+
+ const struct ov_regvals norm_511_p[] = {
+ { R511_DRAM_FLOW_CTL, 0xff },
+ { R51x_SYS_SNAP, 0x00 },
+ { R51x_SYS_SNAP, 0x02 },
+ { R51x_SYS_SNAP, 0x00 },
+ { R511_FIFO_OPTS, 0xff },
+ { R511_COMP_EN, 0x00 },
+ { R511_COMP_LUT_EN, 0x03 },
+ };
+
+ const struct ov_regvals compress_511[] = {
+ { 0x70, 0x1f },
+ { 0x71, 0x05 },
+ { 0x72, 0x06 },
+ { 0x73, 0x06 },
+ { 0x74, 0x14 },
+ { 0x75, 0x03 },
+ { 0x76, 0x04 },
+ { 0x77, 0x04 },
+ };
+
+ PDEBUG(D_PROBE, "Device custom id %x", reg_r(sd, R51x_SYS_CUST_ID));
+
+ rc = write_regvals(sd, init_511, ARRAY_SIZE(init_511));
+ if (rc < 0)
+ return rc;
+
+ switch (sd->bridge) {
+ case BRIDGE_OV511:
+ rc = write_regvals(sd, norm_511, ARRAY_SIZE(norm_511));
+ if (rc < 0)
+ return rc;
+ break;
+ case BRIDGE_OV511PLUS:
+ rc = write_regvals(sd, norm_511_p, ARRAY_SIZE(norm_511_p));
+ if (rc < 0)
+ return rc;
+ break;
+ }
+
+ /* Init compression */
+ rc = write_regvals(sd, compress_511, ARRAY_SIZE(compress_511));
+ if (rc < 0)
+ return rc;
+
+ rc = ov51x_upload_quan_tables(sd);
+ if (rc < 0) {
+ PDEBUG(D_ERR, "Error uploading quantization tables");
+ return rc;
+ }
+
+ return 0;
+}
+
/* This initializes the OV518/OV518+ and the sensor */
static int ov518_configure(struct gspca_dev *gspca_dev)
{
@@ -1462,7 +1863,7 @@ static int ov518_configure(struct gspca_dev *gspca_dev)
int rc;
/* For 518 and 518+ */
- static struct ov_regvals init_518[] = {
+ const struct ov_regvals init_518[] = {
{ R51x_SYS_RESET, 0x40 },
{ R51x_SYS_INIT, 0xe1 },
{ R51x_SYS_RESET, 0x3e },
@@ -1473,7 +1874,7 @@ static int ov518_configure(struct gspca_dev *gspca_dev)
{ 0x5d, 0x03 },
};
- static struct ov_regvals norm_518[] = {
+ const struct ov_regvals norm_518[] = {
{ R51x_SYS_SNAP, 0x02 }, /* Reset */
{ R51x_SYS_SNAP, 0x01 }, /* Enable */
{ 0x31, 0x0f },
@@ -1486,7 +1887,7 @@ static int ov518_configure(struct gspca_dev *gspca_dev)
{ 0x2f, 0x80 },
};
- static struct ov_regvals norm_518_p[] = {
+ const struct ov_regvals norm_518_p[] = {
{ R51x_SYS_SNAP, 0x02 }, /* Reset */
{ R51x_SYS_SNAP, 0x01 }, /* Enable */
{ 0x31, 0x0f },
@@ -1531,7 +1932,7 @@ static int ov518_configure(struct gspca_dev *gspca_dev)
break;
}
- rc = ov518_upload_quan_tables(sd);
+ rc = ov51x_upload_quan_tables(sd);
if (rc < 0) {
PDEBUG(D_ERR, "Error uploading quantization tables");
return rc;
@@ -1573,9 +1974,14 @@ static int sd_config(struct gspca_dev *gspca_dev,
struct cam *cam;
int ret = 0;
- sd->bridge = id->driver_info;
+ sd->bridge = id->driver_info & BRIDGE_MASK;
+ sd->invert_led = id->driver_info & BRIDGE_INVERT_LED;
switch (sd->bridge) {
+ case BRIDGE_OV511:
+ case BRIDGE_OV511PLUS:
+ ret = ov511_configure(gspca_dev);
+ break;
case BRIDGE_OV518:
case BRIDGE_OV518PLUS:
ret = ov518_configure(gspca_dev);
@@ -1634,6 +2040,16 @@ static int sd_config(struct gspca_dev *gspca_dev,
cam = &gspca_dev->cam;
switch (sd->bridge) {
+ case BRIDGE_OV511:
+ case BRIDGE_OV511PLUS:
+ if (!sd->sif) {
+ cam->cam_mode = ov511_vga_mode;
+ cam->nmodes = ARRAY_SIZE(ov511_vga_mode);
+ } else {
+ cam->cam_mode = ov511_sif_mode;
+ cam->nmodes = ARRAY_SIZE(ov511_sif_mode);
+ }
+ break;
case BRIDGE_OV518:
case BRIDGE_OV518PLUS:
if (!sd->sif) {
@@ -1655,13 +2071,28 @@ static int sd_config(struct gspca_dev *gspca_dev,
break;
}
sd->brightness = BRIGHTNESS_DEF;
- sd->contrast = CONTRAST_DEF;
+ if (sd->sensor == SEN_OV6630 || sd->sensor == SEN_OV66308AF)
+ sd->contrast = 200; /* The default is too low for the ov6630 */
+ else
+ sd->contrast = CONTRAST_DEF;
sd->colors = COLOR_DEF;
sd->hflip = HFLIP_DEF;
sd->vflip = VFLIP_DEF;
- if (sd->sensor != SEN_OV7670)
- gspca_dev->ctrl_dis = (1 << HFLIP_IDX)
- | (1 << VFLIP_IDX);
+ sd->autobrightness = AUTOBRIGHT_DEF;
+ if (sd->sensor == SEN_OV7670) {
+ sd->freq = OV7670_FREQ_DEF;
+ gspca_dev->ctrl_dis = 1 << FREQ_IDX;
+ } else {
+ sd->freq = FREQ_DEF;
+ gspca_dev->ctrl_dis = (1 << HFLIP_IDX) | (1 << VFLIP_IDX) |
+ (1 << OV7670_FREQ_IDX);
+ }
+ if (sd->sensor == SEN_OV7640 || sd->sensor == SEN_OV7670)
+ gspca_dev->ctrl_dis |= 1 << AUTOBRIGHT_IDX;
+ /* OV8610 Frequency filter control should work but needs testing */
+ if (sd->sensor == SEN_OV8610)
+ gspca_dev->ctrl_dis |= 1 << FREQ_IDX;
+
return 0;
error:
PDEBUG(D_ERR, "OV519 Config failed");
@@ -1680,6 +2111,7 @@ static int sd_init(struct gspca_dev *gspca_dev)
return -EIO;
break;
case SEN_OV6630:
+ case SEN_OV66308AF:
if (write_i2c_regvals(sd, norm_6x30, ARRAY_SIZE(norm_6x30)))
return -EIO;
break;
@@ -1688,6 +2120,8 @@ static int sd_init(struct gspca_dev *gspca_dev)
/* case SEN_OV76BE: */
if (write_i2c_regvals(sd, norm_7610, ARRAY_SIZE(norm_7610)))
return -EIO;
+ if (i2c_w_mask(sd, 0x0e, 0x00, 0x40))
+ return -EIO;
break;
case SEN_OV7620:
if (write_i2c_regvals(sd, norm_7620, ARRAY_SIZE(norm_7620)))
@@ -1709,6 +2143,126 @@ static int sd_init(struct gspca_dev *gspca_dev)
return 0;
}
+/* Set up the OV511/OV511+ with the given image parameters.
+ *
+ * Do not put any sensor-specific code in here (including I2C I/O functions)
+ */
+static int ov511_mode_init_regs(struct sd *sd)
+{
+ int hsegs, vsegs, packet_size, fps, needed;
+ int interlaced = 0;
+ struct usb_host_interface *alt;
+ struct usb_interface *intf;
+
+ intf = usb_ifnum_to_if(sd->gspca_dev.dev, sd->gspca_dev.iface);
+ alt = usb_altnum_to_altsetting(intf, sd->gspca_dev.alt);
+ if (!alt) {
+ PDEBUG(D_ERR, "Couldn't get altsetting");
+ return -EIO;
+ }
+
+ packet_size = le16_to_cpu(alt->endpoint[0].desc.wMaxPacketSize);
+ reg_w(sd, R51x_FIFO_PSIZE, packet_size >> 5);
+
+ reg_w(sd, R511_CAM_UV_EN, 0x01);
+ reg_w(sd, R511_SNAP_UV_EN, 0x01);
+ reg_w(sd, R511_SNAP_OPTS, 0x03);
+
+ /* Here I'm assuming that snapshot size == image size.
+ * I hope that's always true. --claudio
+ */
+ hsegs = (sd->gspca_dev.width >> 3) - 1;
+ vsegs = (sd->gspca_dev.height >> 3) - 1;
+
+ reg_w(sd, R511_CAM_PXCNT, hsegs);
+ reg_w(sd, R511_CAM_LNCNT, vsegs);
+ reg_w(sd, R511_CAM_PXDIV, 0x00);
+ reg_w(sd, R511_CAM_LNDIV, 0x00);
+
+ /* YUV420, low pass filter on */
+ reg_w(sd, R511_CAM_OPTS, 0x03);
+
+ /* Snapshot additions */
+ reg_w(sd, R511_SNAP_PXCNT, hsegs);
+ reg_w(sd, R511_SNAP_LNCNT, vsegs);
+ reg_w(sd, R511_SNAP_PXDIV, 0x00);
+ reg_w(sd, R511_SNAP_LNDIV, 0x00);
+
+ /******** Set the framerate ********/
+ if (frame_rate > 0)
+ sd->frame_rate = frame_rate;
+
+ switch (sd->sensor) {
+ case SEN_OV6620:
+ /* No framerate control, doesn't like higher rates yet */
+ sd->clockdiv = 3;
+ break;
+
+ /* Note once the FIXME's in mode_init_ov_sensor_regs() are fixed
+ for more sensors we need to do this for them too */
+ case SEN_OV7620:
+ case SEN_OV7640:
+ case SEN_OV76BE:
+ if (sd->gspca_dev.width == 320)
+ interlaced = 1;
+ /* Fall through */
+ case SEN_OV6630:
+ case SEN_OV7610:
+ case SEN_OV7670:
+ switch (sd->frame_rate) {
+ case 30:
+ case 25:
+ /* Not enough bandwidth to do 640x480 @ 30 fps */
+ if (sd->gspca_dev.width != 640) {
+ sd->clockdiv = 0;
+ break;
+ }
+ /* Fall through for 640x480 case */
+ default:
+/* case 20: */
+/* case 15: */
+ sd->clockdiv = 1;
+ break;
+ case 10:
+ sd->clockdiv = 2;
+ break;
+ case 5:
+ sd->clockdiv = 5;
+ break;
+ }
+ if (interlaced) {
+ sd->clockdiv = (sd->clockdiv + 1) * 2 - 1;
+ /* Higher then 10 does not work */
+ if (sd->clockdiv > 10)
+ sd->clockdiv = 10;
+ }
+ break;
+
+ case SEN_OV8610:
+ /* No framerate control ?? */
+ sd->clockdiv = 0;
+ break;
+ }
+
+ /* Check if we have enough bandwidth to disable compression */
+ fps = (interlaced ? 60 : 30) / (sd->clockdiv + 1) + 1;
+ needed = fps * sd->gspca_dev.width * sd->gspca_dev.height * 3 / 2;
+ /* 1400 is a conservative estimate of the max nr of isoc packets/sec */
+ if (needed > 1400 * packet_size) {
+ /* Enable Y and UV quantization and compression */
+ reg_w(sd, R511_COMP_EN, 0x07);
+ reg_w(sd, R511_COMP_LUT_EN, 0x03);
+ } else {
+ reg_w(sd, R511_COMP_EN, 0x06);
+ reg_w(sd, R511_COMP_LUT_EN, 0x00);
+ }
+
+ reg_w(sd, R51x_SYS_RESET, OV511_RESET_OMNICE);
+ reg_w(sd, R51x_SYS_RESET, 0);
+
+ return 0;
+}
+
/* Sets up the OV518/OV518+ with the given image parameters
*
* OV518 needs a completely different approach, until we can figure out what
@@ -1718,7 +2272,19 @@ static int sd_init(struct gspca_dev *gspca_dev)
*/
static int ov518_mode_init_regs(struct sd *sd)
{
- int hsegs, vsegs;
+ int hsegs, vsegs, packet_size;
+ struct usb_host_interface *alt;
+ struct usb_interface *intf;
+
+ intf = usb_ifnum_to_if(sd->gspca_dev.dev, sd->gspca_dev.iface);
+ alt = usb_altnum_to_altsetting(intf, sd->gspca_dev.alt);
+ if (!alt) {
+ PDEBUG(D_ERR, "Couldn't get altsetting");
+ return -EIO;
+ }
+
+ packet_size = le16_to_cpu(alt->endpoint[0].desc.wMaxPacketSize);
+ ov518_reg_w32(sd, R51x_FIFO_PSIZE, packet_size & ~7, 2);
/******** Set the mode ********/
@@ -1755,20 +2321,30 @@ static int ov518_mode_init_regs(struct sd *sd)
/* Windows driver does this here; who knows why */
reg_w(sd, 0x2f, 0x80);
- /******** Set the framerate (to 30 FPS) ********/
- if (sd->bridge == BRIDGE_OV518PLUS)
- sd->clockdiv = 1;
- else
- sd->clockdiv = 0;
+ /******** Set the framerate ********/
+ sd->clockdiv = 1;
/* Mode independent, but framerate dependent, regs */
- reg_w(sd, 0x51, 0x04); /* Clock divider; lower==faster */
+ /* 0x51: Clock divider; Only works on some cams which use 2 crystals */
+ reg_w(sd, 0x51, 0x04);
reg_w(sd, 0x22, 0x18);
reg_w(sd, 0x23, 0xff);
- if (sd->bridge == BRIDGE_OV518PLUS)
- reg_w(sd, 0x21, 0x19);
- else
+ if (sd->bridge == BRIDGE_OV518PLUS) {
+ switch (sd->sensor) {
+ case SEN_OV7620:
+ if (sd->gspca_dev.width == 320) {
+ reg_w(sd, 0x20, 0x00);
+ reg_w(sd, 0x21, 0x19);
+ } else {
+ reg_w(sd, 0x20, 0x60);
+ reg_w(sd, 0x21, 0x1f);
+ }
+ break;
+ default:
+ reg_w(sd, 0x21, 0x19);
+ }
+ } else
reg_w(sd, 0x71, 0x17); /* Compression-related? */
/* FIXME: Sensor-specific */
@@ -1879,7 +2455,11 @@ static int ov519_mode_init_regs(struct sd *sd)
reg_w(sd, OV519_R10_H_SIZE, sd->gspca_dev.width >> 4);
reg_w(sd, OV519_R11_V_SIZE, sd->gspca_dev.height >> 3);
- reg_w(sd, OV519_R12_X_OFFSETL, 0x00);
+ if (sd->sensor == SEN_OV7670 &&
+ sd->gspca_dev.cam.cam_mode[sd->gspca_dev.curr_mode].priv)
+ reg_w(sd, OV519_R12_X_OFFSETL, 0x04);
+ else
+ reg_w(sd, OV519_R12_X_OFFSETL, 0x00);
reg_w(sd, OV519_R13_X_OFFSETH, 0x00);
reg_w(sd, OV519_R14_Y_OFFSETL, 0x00);
reg_w(sd, OV519_R15_Y_OFFSETH, 0x00);
@@ -1971,7 +2551,7 @@ static int mode_init_ov_sensor_regs(struct sd *sd)
int qvga;
gspca_dev = &sd->gspca_dev;
- qvga = gspca_dev->cam.cam_mode[(int) gspca_dev->curr_mode].priv;
+ qvga = gspca_dev->cam.cam_mode[(int) gspca_dev->curr_mode].priv & 1;
/******** Mode (VGA/QVGA) and sensor specific regs ********/
switch (sd->sensor) {
@@ -1983,21 +2563,16 @@ static int mode_init_ov_sensor_regs(struct sd *sd)
i2c_w_mask(sd, 0x14, qvga ? 0x20 : 0x00, 0x20);
break;
case SEN_OV7620:
-/* i2c_w(sd, 0x2b, 0x00); */
+ case SEN_OV76BE:
i2c_w_mask(sd, 0x14, qvga ? 0x20 : 0x00, 0x20);
i2c_w_mask(sd, 0x28, qvga ? 0x00 : 0x20, 0x20);
i2c_w(sd, 0x24, qvga ? 0x20 : 0x3a);
i2c_w(sd, 0x25, qvga ? 0x30 : 0x60);
i2c_w_mask(sd, 0x2d, qvga ? 0x40 : 0x00, 0x40);
- i2c_w_mask(sd, 0x67, qvga ? 0xf0 : 0x90, 0xf0);
+ i2c_w_mask(sd, 0x67, qvga ? 0xb0 : 0x90, 0xf0);
i2c_w_mask(sd, 0x74, qvga ? 0x20 : 0x00, 0x20);
break;
- case SEN_OV76BE:
-/* i2c_w(sd, 0x2b, 0x00); */
- i2c_w_mask(sd, 0x14, qvga ? 0x20 : 0x00, 0x20);
- break;
case SEN_OV7640:
-/* i2c_w(sd, 0x2b, 0x00); */
i2c_w_mask(sd, 0x14, qvga ? 0x20 : 0x00, 0x20);
i2c_w_mask(sd, 0x28, qvga ? 0x00 : 0x20, 0x20);
/* i2c_w(sd, 0x24, qvga ? 0x20 : 0x3a); */
@@ -2016,6 +2591,7 @@ static int mode_init_ov_sensor_regs(struct sd *sd)
break;
case SEN_OV6620:
case SEN_OV6630:
+ case SEN_OV66308AF:
i2c_w_mask(sd, 0x14, qvga ? 0x20 : 0x00, 0x20);
break;
default:
@@ -2023,10 +2599,6 @@ static int mode_init_ov_sensor_regs(struct sd *sd)
}
/******** Palette-specific regs ********/
- if (sd->sensor == SEN_OV7610 || sd->sensor == SEN_OV76BE) {
- /* not valid on the OV6620/OV7620/6630? */
- i2c_w_mask(sd, 0x0e, 0x00, 0x40);
- }
/* The OV518 needs special treatment. Although both the OV518
* and the OV6630 support a 16-bit video bus, only the 8 bit Y
@@ -2036,25 +2608,12 @@ static int mode_init_ov_sensor_regs(struct sd *sd)
/* OV7640 is 8-bit only */
- if (sd->sensor != SEN_OV6630 && sd->sensor != SEN_OV7640)
+ if (sd->sensor != SEN_OV6630 && sd->sensor != SEN_OV66308AF &&
+ sd->sensor != SEN_OV7640)
i2c_w_mask(sd, 0x13, 0x00, 0x20);
/******** Clock programming ********/
- /* The OV6620 needs special handling. This prevents the
- * severe banding that normally occurs */
- if (sd->sensor == SEN_OV6620) {
-
- /* Clock down */
- i2c_w(sd, 0x2a, 0x04);
- i2c_w(sd, 0x11, sd->clockdiv);
- i2c_w(sd, 0x2a, 0x84);
- /* This next setting is critical. It seems to improve
- * the gain or the contrast. The "reserved" bits seem
- * to have some effect in this case. */
- i2c_w(sd, 0x2d, 0x85);
- } else {
- i2c_w(sd, 0x11, sd->clockdiv);
- }
+ i2c_w(sd, 0x11, sd->clockdiv);
/******** Special Features ********/
/* no evidence this is possible with OV7670, either */
@@ -2098,13 +2657,14 @@ static void sethvflip(struct sd *sd)
static int set_ov_sensor_window(struct sd *sd)
{
struct gspca_dev *gspca_dev;
- int qvga;
+ int qvga, crop;
int hwsbase, hwebase, vwsbase, vwebase, hwscale, vwscale;
int ret, hstart, hstop, vstop, vstart;
__u8 v;
gspca_dev = &sd->gspca_dev;
- qvga = gspca_dev->cam.cam_mode[(int) gspca_dev->curr_mode].priv;
+ qvga = gspca_dev->cam.cam_mode[(int) gspca_dev->curr_mode].priv & 1;
+ crop = gspca_dev->cam.cam_mode[(int) gspca_dev->curr_mode].priv & 2;
/* The different sensor ICs handle setting up of window differently.
* IF YOU SET IT WRONG, YOU WILL GET ALL ZERO ISOC DATA FROM OV51x!! */
@@ -2123,14 +2683,19 @@ static int set_ov_sensor_window(struct sd *sd)
break;
case SEN_OV6620:
case SEN_OV6630:
+ case SEN_OV66308AF:
hwsbase = 0x38;
hwebase = 0x3a;
vwsbase = 0x05;
vwebase = 0x06;
- if (qvga) {
+ if (sd->sensor == SEN_OV66308AF && qvga)
/* HDG: this fixes U and V getting swapped */
- hwsbase--;
- vwsbase--;
+ hwsbase++;
+ if (crop) {
+ hwsbase += 8;
+ hwebase += 8;
+ vwsbase += 11;
+ vwebase += 11;
}
break;
case SEN_OV7620:
@@ -2155,6 +2720,7 @@ static int set_ov_sensor_window(struct sd *sd)
switch (sd->sensor) {
case SEN_OV6620:
case SEN_OV6630:
+ case SEN_OV66308AF:
if (qvga) { /* QCIF */
hwscale = 0;
vwscale = 0;
@@ -2207,7 +2773,7 @@ static int set_ov_sensor_window(struct sd *sd)
if (qvga) { /* QVGA from ov7670.c by
* Jonathan Corbet */
hstart = 164;
- hstop = 20;
+ hstop = 28;
vstart = 14;
vstop = 494;
} else { /* VGA */
@@ -2233,7 +2799,6 @@ static int set_ov_sensor_window(struct sd *sd)
msleep(10); /* need to sleep between read and write to
* same reg! */
i2c_w(sd, OV7670_REG_VREF, v);
- sethvflip(sd);
} else {
i2c_w(sd, 0x17, hwsbase);
i2c_w(sd, 0x18, hwebase + (sd->gspca_dev.width >> hwscale));
@@ -2250,6 +2815,10 @@ static int sd_start(struct gspca_dev *gspca_dev)
int ret = 0;
switch (sd->bridge) {
+ case BRIDGE_OV511:
+ case BRIDGE_OV511PLUS:
+ ret = ov511_mode_init_regs(sd);
+ break;
case BRIDGE_OV518:
case BRIDGE_OV518PLUS:
ret = ov518_mode_init_regs(sd);
@@ -2268,6 +2837,9 @@ static int sd_start(struct gspca_dev *gspca_dev)
setcontrast(gspca_dev);
setbrightness(gspca_dev);
setcolors(gspca_dev);
+ sethvflip(sd);
+ setautobrightness(sd);
+ setfreq(sd);
ret = ov51x_restart(sd);
if (ret < 0)
@@ -2287,23 +2859,88 @@ static void sd_stopN(struct gspca_dev *gspca_dev)
ov51x_led_control(sd, 0);
}
-static void ov518_pkt_scan(struct gspca_dev *gspca_dev,
+static void ov511_pkt_scan(struct gspca_dev *gspca_dev,
struct gspca_frame *frame, /* target */
- __u8 *data, /* isoc packet */
+ __u8 *in, /* isoc packet */
int len) /* iso packet length */
{
- PDEBUG(D_STREAM, "ov518_pkt_scan: %d bytes", len);
+ struct sd *sd = (struct sd *) gspca_dev;
- if (len & 7) {
- len--;
- PDEBUG(D_STREAM, "packet number: %d\n", (int)data[len]);
+ /* SOF/EOF packets have 1st to 8th bytes zeroed and the 9th
+ * byte non-zero. The EOF packet has image width/height in the
+ * 10th and 11th bytes. The 9th byte is given as follows:
+ *
+ * bit 7: EOF
+ * 6: compression enabled
+ * 5: 422/420/400 modes
+ * 4: 422/420/400 modes
+ * 3: 1
+ * 2: snapshot button on
+ * 1: snapshot frame
+ * 0: even/odd field
+ */
+ if (!(in[0] | in[1] | in[2] | in[3] | in[4] | in[5] | in[6] | in[7]) &&
+ (in[8] & 0x08)) {
+ if (in[8] & 0x80) {
+ /* Frame end */
+ if ((in[9] + 1) * 8 != gspca_dev->width ||
+ (in[10] + 1) * 8 != gspca_dev->height) {
+ PDEBUG(D_ERR, "Invalid frame size, got: %dx%d,"
+ " requested: %dx%d\n",
+ (in[9] + 1) * 8, (in[10] + 1) * 8,
+ gspca_dev->width, gspca_dev->height);
+ gspca_dev->last_packet_type = DISCARD_PACKET;
+ return;
+ }
+ /* Add 11 byte footer to frame, might be usefull */
+ gspca_frame_add(gspca_dev, LAST_PACKET, frame, in, 11);
+ return;
+ } else {
+ /* Frame start */
+ gspca_frame_add(gspca_dev, FIRST_PACKET, frame, in, 0);
+ sd->packet_nr = 0;
+ }
}
+ /* Ignore the packet number */
+ len--;
+
+ /* intermediate packet */
+ gspca_frame_add(gspca_dev, INTER_PACKET, frame, in, len);
+}
+
+static void ov518_pkt_scan(struct gspca_dev *gspca_dev,
+ struct gspca_frame *frame, /* target */
+ __u8 *data, /* isoc packet */
+ int len) /* iso packet length */
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+
/* A false positive here is likely, until OVT gives me
* the definitive SOF/EOF format */
if ((!(data[0] | data[1] | data[2] | data[3] | data[5])) && data[6]) {
gspca_frame_add(gspca_dev, LAST_PACKET, frame, data, 0);
gspca_frame_add(gspca_dev, FIRST_PACKET, frame, data, 0);
+ sd->packet_nr = 0;
+ }
+
+ if (gspca_dev->last_packet_type == DISCARD_PACKET)
+ return;
+
+ /* Does this device use packet numbers ? */
+ if (len & 7) {
+ len--;
+ if (sd->packet_nr == data[len])
+ sd->packet_nr++;
+ /* The last few packets of the frame (which are all 0's
+ except that they may contain part of the footer), are
+ numbered 0 */
+ else if (sd->packet_nr == 0 || data[len]) {
+ PDEBUG(D_ERR, "Invalid packet nr: %d (expect: %d)",
+ (int)data[len], (int)sd->packet_nr);
+ gspca_dev->last_packet_type = DISCARD_PACKET;
+ return;
+ }
}
/* intermediate packet */
@@ -2364,6 +3001,7 @@ static void sd_pkt_scan(struct gspca_dev *gspca_dev,
switch (sd->bridge) {
case BRIDGE_OV511:
case BRIDGE_OV511PLUS:
+ ov511_pkt_scan(gspca_dev, frame, data, len);
break;
case BRIDGE_OV518:
case BRIDGE_OV518PLUS:
@@ -2389,13 +3027,13 @@ static void setbrightness(struct gspca_dev *gspca_dev)
case SEN_OV76BE:
case SEN_OV6620:
case SEN_OV6630:
+ case SEN_OV66308AF:
case SEN_OV7640:
i2c_w(sd, OV7610_REG_BRT, val);
break;
case SEN_OV7620:
/* 7620 doesn't like manual changes when in auto mode */
-/*fixme
- * if (!sd->auto_brt) */
+ if (!sd->autobrightness)
i2c_w(sd, OV7610_REG_BRT, val);
break;
case SEN_OV7670:
@@ -2418,6 +3056,7 @@ static void setcontrast(struct gspca_dev *gspca_dev)
i2c_w(sd, OV7610_REG_CNT, val);
break;
case SEN_OV6630:
+ case SEN_OV66308AF:
i2c_w_mask(sd, OV7610_REG_CNT, val >> 4, 0x0f);
break;
case SEN_OV8610: {
@@ -2462,6 +3101,7 @@ static void setcolors(struct gspca_dev *gspca_dev)
case SEN_OV76BE:
case SEN_OV6620:
case SEN_OV6630:
+ case SEN_OV66308AF:
i2c_w(sd, OV7610_REG_SAT, val);
break;
case SEN_OV7620:
@@ -2482,6 +3122,72 @@ static void setcolors(struct gspca_dev *gspca_dev)
}
}
+static void setautobrightness(struct sd *sd)
+{
+ if (sd->sensor == SEN_OV7640 || sd->sensor == SEN_OV7670)
+ return;
+
+ i2c_w_mask(sd, 0x2d, sd->autobrightness ? 0x10 : 0x00, 0x10);
+}
+
+static void setfreq(struct sd *sd)
+{
+ if (sd->sensor == SEN_OV7670) {
+ switch (sd->freq) {
+ case 0: /* Banding filter disabled */
+ i2c_w_mask(sd, OV7670_REG_COM8, 0, OV7670_COM8_BFILT);
+ break;
+ case 1: /* 50 hz */
+ i2c_w_mask(sd, OV7670_REG_COM8, OV7670_COM8_BFILT,
+ OV7670_COM8_BFILT);
+ i2c_w_mask(sd, OV7670_REG_COM11, 0x08, 0x18);
+ break;
+ case 2: /* 60 hz */
+ i2c_w_mask(sd, OV7670_REG_COM8, OV7670_COM8_BFILT,
+ OV7670_COM8_BFILT);
+ i2c_w_mask(sd, OV7670_REG_COM11, 0x00, 0x18);
+ break;
+ case 3: /* Auto hz */
+ i2c_w_mask(sd, OV7670_REG_COM8, OV7670_COM8_BFILT,
+ OV7670_COM8_BFILT);
+ i2c_w_mask(sd, OV7670_REG_COM11, OV7670_COM11_HZAUTO,
+ 0x18);
+ break;
+ }
+ } else {
+ switch (sd->freq) {
+ case 0: /* Banding filter disabled */
+ i2c_w_mask(sd, 0x2d, 0x00, 0x04);
+ i2c_w_mask(sd, 0x2a, 0x00, 0x80);
+ break;
+ case 1: /* 50 hz (filter on and framerate adj) */
+ i2c_w_mask(sd, 0x2d, 0x04, 0x04);
+ i2c_w_mask(sd, 0x2a, 0x80, 0x80);
+ /* 20 fps -> 16.667 fps */
+ if (sd->sensor == SEN_OV6620 ||
+ sd->sensor == SEN_OV6630 ||
+ sd->sensor == SEN_OV66308AF)
+ i2c_w(sd, 0x2b, 0x5e);
+ else
+ i2c_w(sd, 0x2b, 0xac);
+ break;
+ case 2: /* 60 hz (filter on, ...) */
+ i2c_w_mask(sd, 0x2d, 0x04, 0x04);
+ if (sd->sensor == SEN_OV6620 ||
+ sd->sensor == SEN_OV6630 ||
+ sd->sensor == SEN_OV66308AF) {
+ /* 20 fps -> 15 fps */
+ i2c_w_mask(sd, 0x2a, 0x80, 0x80);
+ i2c_w(sd, 0x2b, 0xa8);
+ } else {
+ /* no framerate adj. */
+ i2c_w_mask(sd, 0x2a, 0x00, 0x80);
+ }
+ break;
+ }
+ }
+}
+
static int sd_setbrightness(struct gspca_dev *gspca_dev, __s32 val)
{
struct sd *sd = (struct sd *) gspca_dev;
@@ -2572,6 +3278,71 @@ static int sd_getvflip(struct gspca_dev *gspca_dev, __s32 *val)
return 0;
}
+static int sd_setautobrightness(struct gspca_dev *gspca_dev, __s32 val)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+
+ sd->autobrightness = val;
+ if (gspca_dev->streaming)
+ setautobrightness(sd);
+ return 0;
+}
+
+static int sd_getautobrightness(struct gspca_dev *gspca_dev, __s32 *val)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+
+ *val = sd->autobrightness;
+ return 0;
+}
+
+static int sd_setfreq(struct gspca_dev *gspca_dev, __s32 val)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+
+ sd->freq = val;
+ if (gspca_dev->streaming)
+ setfreq(sd);
+ return 0;
+}
+
+static int sd_getfreq(struct gspca_dev *gspca_dev, __s32 *val)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+
+ *val = sd->freq;
+ return 0;
+}
+
+static int sd_querymenu(struct gspca_dev *gspca_dev,
+ struct v4l2_querymenu *menu)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+
+ switch (menu->id) {
+ case V4L2_CID_POWER_LINE_FREQUENCY:
+ switch (menu->index) {
+ case 0: /* V4L2_CID_POWER_LINE_FREQUENCY_DISABLED */
+ strcpy((char *) menu->name, "NoFliker");
+ return 0;
+ case 1: /* V4L2_CID_POWER_LINE_FREQUENCY_50HZ */
+ strcpy((char *) menu->name, "50 Hz");
+ return 0;
+ case 2: /* V4L2_CID_POWER_LINE_FREQUENCY_60HZ */
+ strcpy((char *) menu->name, "60 Hz");
+ return 0;
+ case 3:
+ if (sd->sensor != SEN_OV7670)
+ return -EINVAL;
+
+ strcpy((char *) menu->name, "Automatic");
+ return 0;
+ }
+ break;
+ }
+ return -EINVAL;
+}
+
/* sub-driver description */
static const struct sd_desc sd_desc = {
.name = MODULE_NAME,
@@ -2582,6 +3353,7 @@ static const struct sd_desc sd_desc = {
.start = sd_start,
.stopN = sd_stopN,
.pkt_scan = sd_pkt_scan,
+ .querymenu = sd_querymenu,
};
/* -- module initialisation -- */
@@ -2590,17 +3362,22 @@ static const __devinitdata struct usb_device_id device_table[] = {
{USB_DEVICE(0x041e, 0x405f), .driver_info = BRIDGE_OV519 },
{USB_DEVICE(0x041e, 0x4060), .driver_info = BRIDGE_OV519 },
{USB_DEVICE(0x041e, 0x4061), .driver_info = BRIDGE_OV519 },
- {USB_DEVICE(0x041e, 0x4064), .driver_info = BRIDGE_OV519 },
- {USB_DEVICE(0x041e, 0x4068), .driver_info = BRIDGE_OV519 },
+ {USB_DEVICE(0x041e, 0x4064),
+ .driver_info = BRIDGE_OV519 | BRIDGE_INVERT_LED },
+ {USB_DEVICE(0x041e, 0x4068),
+ .driver_info = BRIDGE_OV519 | BRIDGE_INVERT_LED },
{USB_DEVICE(0x045e, 0x028c), .driver_info = BRIDGE_OV519 },
{USB_DEVICE(0x054c, 0x0154), .driver_info = BRIDGE_OV519 },
{USB_DEVICE(0x054c, 0x0155), .driver_info = BRIDGE_OV519 },
+ {USB_DEVICE(0x05a9, 0x0511), .driver_info = BRIDGE_OV511 },
{USB_DEVICE(0x05a9, 0x0518), .driver_info = BRIDGE_OV518 },
{USB_DEVICE(0x05a9, 0x0519), .driver_info = BRIDGE_OV519 },
{USB_DEVICE(0x05a9, 0x0530), .driver_info = BRIDGE_OV519 },
{USB_DEVICE(0x05a9, 0x4519), .driver_info = BRIDGE_OV519 },
{USB_DEVICE(0x05a9, 0x8519), .driver_info = BRIDGE_OV519 },
+ {USB_DEVICE(0x05a9, 0xa511), .driver_info = BRIDGE_OV511PLUS },
{USB_DEVICE(0x05a9, 0xa518), .driver_info = BRIDGE_OV518PLUS },
+ {USB_DEVICE(0x0813, 0x0002), .driver_info = BRIDGE_OV511PLUS },
{}
};
diff --git a/drivers/media/video/gspca/sonixj.c b/drivers/media/video/gspca/sonixj.c
index dc6a6f11354a..0d02f41fa7d0 100644
--- a/drivers/media/video/gspca/sonixj.c
+++ b/drivers/media/video/gspca/sonixj.c
@@ -46,6 +46,7 @@ struct sd {
u8 gamma;
u8 vflip; /* ov7630/ov7648 only */
u8 infrared; /* mt9v111 only */
+ u8 freq; /* ov76xx only */
u8 quality; /* image quality */
#define QUALITY_MIN 60
#define QUALITY_MAX 95
@@ -96,8 +97,11 @@ static int sd_setvflip(struct gspca_dev *gspca_dev, __s32 val);
static int sd_getvflip(struct gspca_dev *gspca_dev, __s32 *val);
static int sd_setinfrared(struct gspca_dev *gspca_dev, __s32 val);
static int sd_getinfrared(struct gspca_dev *gspca_dev, __s32 *val);
+static int sd_setfreq(struct gspca_dev *gspca_dev, __s32 val);
+static int sd_getfreq(struct gspca_dev *gspca_dev, __s32 *val);
static struct ctrl sd_ctrls[] = {
+#define BRIGHTNESS_IDX 0
{
{
.id = V4L2_CID_BRIGHTNESS,
@@ -113,6 +117,7 @@ static struct ctrl sd_ctrls[] = {
.set = sd_setbrightness,
.get = sd_getbrightness,
},
+#define CONTRAST_IDX 1
{
{
.id = V4L2_CID_CONTRAST,
@@ -128,20 +133,22 @@ static struct ctrl sd_ctrls[] = {
.set = sd_setcontrast,
.get = sd_getcontrast,
},
+#define COLOR_IDX 2
{
{
.id = V4L2_CID_SATURATION,
.type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Color",
+ .name = "Saturation",
.minimum = 0,
.maximum = 40,
.step = 1,
-#define COLOR_DEF 32
+#define COLOR_DEF 25
.default_value = COLOR_DEF,
},
.set = sd_setcolors,
.get = sd_getcolors,
},
+#define BLUE_BALANCE_IDX 3
{
{
.id = V4L2_CID_BLUE_BALANCE,
@@ -156,6 +163,7 @@ static struct ctrl sd_ctrls[] = {
.set = sd_setblue_balance,
.get = sd_getblue_balance,
},
+#define RED_BALANCE_IDX 4
{
{
.id = V4L2_CID_RED_BALANCE,
@@ -170,6 +178,7 @@ static struct ctrl sd_ctrls[] = {
.set = sd_setred_balance,
.get = sd_getred_balance,
},
+#define GAMMA_IDX 5
{
{
.id = V4L2_CID_GAMMA,
@@ -184,7 +193,7 @@ static struct ctrl sd_ctrls[] = {
.set = sd_setgamma,
.get = sd_getgamma,
},
-#define AUTOGAIN_IDX 5
+#define AUTOGAIN_IDX 6
{
{
.id = V4L2_CID_AUTOGAIN,
@@ -200,7 +209,7 @@ static struct ctrl sd_ctrls[] = {
.get = sd_getautogain,
},
/* ov7630/ov7648 only */
-#define VFLIP_IDX 6
+#define VFLIP_IDX 7
{
{
.id = V4L2_CID_VFLIP,
@@ -209,14 +218,14 @@ static struct ctrl sd_ctrls[] = {
.minimum = 0,
.maximum = 1,
.step = 1,
-#define VFLIP_DEF 0 /* vflip def = 1 for ov7630 */
+#define VFLIP_DEF 0
.default_value = VFLIP_DEF,
},
.set = sd_setvflip,
.get = sd_getvflip,
},
/* mt9v111 only */
-#define INFRARED_IDX 7
+#define INFRARED_IDX 8
{
{
.id = V4L2_CID_INFRARED,
@@ -231,28 +240,44 @@ static struct ctrl sd_ctrls[] = {
.set = sd_setinfrared,
.get = sd_getinfrared,
},
+/* ov7630/ov7648/ov7660 only */
+#define FREQ_IDX 9
+ {
+ {
+ .id = V4L2_CID_POWER_LINE_FREQUENCY,
+ .type = V4L2_CTRL_TYPE_MENU,
+ .name = "Light frequency filter",
+ .minimum = 0,
+ .maximum = 2, /* 0: 0, 1: 50Hz, 2:60Hz */
+ .step = 1,
+#define FREQ_DEF 2
+ .default_value = FREQ_DEF,
+ },
+ .set = sd_setfreq,
+ .get = sd_getfreq,
+ },
};
/* table of the disabled controls */
static __u32 ctrl_dis[] = {
- (1 << INFRARED_IDX) | (1 << VFLIP_IDX),
+ (1 << INFRARED_IDX) | (1 << VFLIP_IDX) | (1 << FREQ_IDX),
/* SENSOR_HV7131R 0 */
- (1 << INFRARED_IDX) | (1 << VFLIP_IDX),
+ (1 << INFRARED_IDX) | (1 << VFLIP_IDX) | (1 << FREQ_IDX),
/* SENSOR_MI0360 1 */
- (1 << INFRARED_IDX) | (1 << VFLIP_IDX),
+ (1 << INFRARED_IDX) | (1 << VFLIP_IDX) | (1 << FREQ_IDX),
/* SENSOR_MO4000 2 */
- (1 << VFLIP_IDX),
+ (1 << VFLIP_IDX) | (1 << FREQ_IDX),
/* SENSOR_MT9V111 3 */
- (1 << INFRARED_IDX) | (1 << VFLIP_IDX),
+ (1 << INFRARED_IDX) | (1 << VFLIP_IDX) | (1 << FREQ_IDX),
/* SENSOR_OM6802 4 */
- (1 << AUTOGAIN_IDX) | (1 << INFRARED_IDX),
+ (1 << INFRARED_IDX),
/* SENSOR_OV7630 5 */
(1 << INFRARED_IDX),
/* SENSOR_OV7648 6 */
(1 << AUTOGAIN_IDX) | (1 << INFRARED_IDX) | (1 << VFLIP_IDX),
/* SENSOR_OV7660 7 */
- (1 << AUTOGAIN_IDX) | (1 << INFRARED_IDX) | (1 << VFLIP_IDX),
- /* SENSOR_SP80708 8 */
+ (1 << AUTOGAIN_IDX) | (1 << INFRARED_IDX) | (1 << VFLIP_IDX) |
+ (1 << FREQ_IDX), /* SENSOR_SP80708 8 */
};
static const struct v4l2_pix_format vga_mode[] = {
@@ -268,7 +293,8 @@ static const struct v4l2_pix_format vga_mode[] = {
.priv = 1},
{640, 480, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE,
.bytesperline = 640,
- .sizeimage = 640 * 480 * 3 / 8 + 590,
+ /* Note 3 / 8 is not large enough, not even 5 / 8 is ?! */
+ .sizeimage = 640 * 480 * 3 / 4 + 590,
.colorspace = V4L2_COLORSPACE_JPEG,
.priv = 0},
};
@@ -604,7 +630,9 @@ static const u8 ov7630_sensor_init[][8] = {
/* win: i2c_r from 00 to 80 */
{0xd1, 0x21, 0x03, 0x80, 0x10, 0x20, 0x80, 0x10},
{0xb1, 0x21, 0x0c, 0x20, 0x20, 0x00, 0x00, 0x10},
- {0xd1, 0x21, 0x11, 0x00, 0x48, 0xc0, 0x00, 0x10},
+/* HDG: 0x11 was 0x00 change to 0x01 for better exposure (15 fps instead of 30)
+ 0x13 was 0xc0 change to 0xc3 for auto gain and exposure */
+ {0xd1, 0x21, 0x11, 0x01, 0x48, 0xc3, 0x00, 0x10},
{0xb1, 0x21, 0x15, 0x80, 0x03, 0x00, 0x00, 0x10},
{0xd1, 0x21, 0x17, 0x1b, 0xbd, 0x05, 0xf6, 0x10},
{0xa1, 0x21, 0x1b, 0x04, 0x00, 0x00, 0x00, 0x10},
@@ -638,9 +666,8 @@ static const u8 ov7630_sensor_init[][8] = {
{0xa1, 0x21, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10},
{0xb1, 0x21, 0x01, 0x80, 0x80, 0x00, 0x00, 0x10},
/* */
- {0xa1, 0x21, 0x11, 0x00, 0x00, 0x00, 0x00, 0x10},
- {0xa1, 0x21, 0x2a, 0x88, 0x00, 0x00, 0x00, 0x10},
- {0xa1, 0x21, 0x2b, 0x34, 0x00, 0x00, 0x00, 0x10},
+/* {0xa1, 0x21, 0x2a, 0x88, 0x00, 0x00, 0x00, 0x10}, * set by setfreq */
+/* {0xa1, 0x21, 0x2b, 0x34, 0x00, 0x00, 0x00, 0x10}, * set by setfreq */
/* */
{0xa1, 0x21, 0x10, 0x83, 0x00, 0x00, 0x00, 0x10},
/* {0xb1, 0x21, 0x01, 0x88, 0x70, 0x00, 0x00, 0x10}, */
@@ -673,7 +700,7 @@ static const u8 ov7648_sensor_init[][8] = {
{0xd1, 0x21, 0x21, 0x86, 0x00, 0xde, 0xa0, 0x10},
/* {0xd1, 0x21, 0x25, 0x80, 0x32, 0xfe, 0xa0, 0x10}, jfm done */
/* {0xd1, 0x21, 0x29, 0x00, 0x91, 0x00, 0x88, 0x10}, jfm done */
- {0xb1, 0x21, 0x2d, 0x85, 0x00, 0x00, 0x00, 0x10},
+/* {0xb1, 0x21, 0x2d, 0x85, 0x00, 0x00, 0x00, 0x10}, set by setfreq */
/*...*/
/* {0xa1, 0x21, 0x12, 0x08, 0x00, 0x00, 0x00, 0x10}, jfm done */
/* {0xa1, 0x21, 0x75, 0x06, 0x00, 0x00, 0x00, 0x10}, * COMN
@@ -1294,11 +1321,9 @@ static int sd_config(struct gspca_dev *gspca_dev,
sd->gamma = GAMMA_DEF;
sd->autogain = AUTOGAIN_DEF;
sd->ag_cnt = -1;
- if (sd->sensor != SENSOR_OV7630)
- sd->vflip = 0;
- else
- sd->vflip = 1;
+ sd->vflip = VFLIP_DEF;
sd->infrared = INFRARED_DEF;
+ sd->freq = FREQ_DEF;
sd->quality = QUALITY_DEF;
sd->jpegqual = 80;
@@ -1569,7 +1594,7 @@ static void setautogain(struct gspca_dev *gspca_dev)
else
comb = 0xa0;
if (sd->autogain)
- comb |= 0x02;
+ comb |= 0x03;
i2c_w1(&sd->gspca_dev, 0x13, comb);
return;
}
@@ -1585,12 +1610,15 @@ static void setvflip(struct sd *sd)
{
u8 comn;
- if (sd->sensor == SENSOR_OV7630)
+ if (sd->sensor == SENSOR_OV7630) {
comn = 0x02;
- else
+ if (!sd->vflip)
+ comn |= 0x80;
+ } else {
comn = 0x06;
- if (sd->vflip)
- comn |= 0x80;
+ if (sd->vflip)
+ comn |= 0x80;
+ }
i2c_w1(&sd->gspca_dev, 0x75, comn);
}
@@ -1602,6 +1630,58 @@ static void setinfrared(struct sd *sd)
sd->infrared ? 0x66 : 0x64);
}
+static void setfreq(struct gspca_dev *gspca_dev)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+
+ if (sd->sensor == SENSOR_OV7660) {
+ switch (sd->freq) {
+ case 0: /* Banding filter disabled */
+ i2c_w1(gspca_dev, 0x13, 0xdf);
+ break;
+ case 1: /* 50 hz */
+ i2c_w1(gspca_dev, 0x13, 0xff);
+ i2c_w1(gspca_dev, 0x3b, 0x0a);
+ break;
+ case 2: /* 60 hz */
+ i2c_w1(gspca_dev, 0x13, 0xff);
+ i2c_w1(gspca_dev, 0x3b, 0x02);
+ break;
+ }
+ } else {
+ u8 reg2a = 0, reg2b = 0, reg2d = 0;
+
+ /* Get reg2a / reg2d base values */
+ switch (sd->sensor) {
+ case SENSOR_OV7630:
+ reg2a = 0x08;
+ reg2d = 0x01;
+ break;
+ case SENSOR_OV7648:
+ reg2a = 0x11;
+ reg2d = 0x81;
+ break;
+ }
+
+ switch (sd->freq) {
+ case 0: /* Banding filter disabled */
+ break;
+ case 1: /* 50 hz (filter on and framerate adj) */
+ reg2a |= 0x80;
+ reg2b = 0xac;
+ reg2d |= 0x04;
+ break;
+ case 2: /* 60 hz (filter on, no framerate adj) */
+ reg2a |= 0x80;
+ reg2d |= 0x04;
+ break;
+ }
+ i2c_w1(gspca_dev, 0x2a, reg2a);
+ i2c_w1(gspca_dev, 0x2b, reg2b);
+ i2c_w1(gspca_dev, 0x2d, reg2d);
+ }
+}
+
static void setjpegqual(struct gspca_dev *gspca_dev)
{
struct sd *sd = (struct sd *) gspca_dev;
@@ -1828,6 +1908,7 @@ static int sd_start(struct gspca_dev *gspca_dev)
setbrightness(gspca_dev);
setcontrast(gspca_dev);
setautogain(gspca_dev);
+ setfreq(gspca_dev);
return 0;
}
@@ -2131,6 +2212,24 @@ static int sd_getinfrared(struct gspca_dev *gspca_dev, __s32 *val)
return 0;
}
+static int sd_setfreq(struct gspca_dev *gspca_dev, __s32 val)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+
+ sd->freq = val;
+ if (gspca_dev->streaming)
+ setfreq(gspca_dev);
+ return 0;
+}
+
+static int sd_getfreq(struct gspca_dev *gspca_dev, __s32 *val)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+
+ *val = sd->freq;
+ return 0;
+}
+
static int sd_set_jcomp(struct gspca_dev *gspca_dev,
struct v4l2_jpegcompression *jcomp)
{
@@ -2159,6 +2258,27 @@ static int sd_get_jcomp(struct gspca_dev *gspca_dev,
return 0;
}
+static int sd_querymenu(struct gspca_dev *gspca_dev,
+ struct v4l2_querymenu *menu)
+{
+ switch (menu->id) {
+ case V4L2_CID_POWER_LINE_FREQUENCY:
+ switch (menu->index) {
+ case 0: /* V4L2_CID_POWER_LINE_FREQUENCY_DISABLED */
+ strcpy((char *) menu->name, "NoFliker");
+ return 0;
+ case 1: /* V4L2_CID_POWER_LINE_FREQUENCY_50HZ */
+ strcpy((char *) menu->name, "50 Hz");
+ return 0;
+ case 2: /* V4L2_CID_POWER_LINE_FREQUENCY_60HZ */
+ strcpy((char *) menu->name, "60 Hz");
+ return 0;
+ }
+ break;
+ }
+ return -EINVAL;
+}
+
/* sub-driver description */
static const struct sd_desc sd_desc = {
.name = MODULE_NAME,
@@ -2173,6 +2293,7 @@ static const struct sd_desc sd_desc = {
.dq_callback = do_autogain,
.get_jcomp = sd_get_jcomp,
.set_jcomp = sd_set_jcomp,
+ .querymenu = sd_querymenu,
};
/* -- module initialisation -- */
@@ -2233,7 +2354,7 @@ static const __devinitdata struct usb_device_id device_table[] = {
{USB_DEVICE(0x0c45, 0x613b), BSI(SN9C120, OV7660, 0x21)},
#endif
{USB_DEVICE(0x0c45, 0x613c), BSI(SN9C120, HV7131R, 0x11)},
-/* {USB_DEVICE(0x0c45, 0x613e), BSI(SN9C120, OV7630, 0x??)}, */
+ {USB_DEVICE(0x0c45, 0x613e), BSI(SN9C120, OV7630, 0x21)},
{USB_DEVICE(0x0c45, 0x6143), BSI(SN9C120, SP80708, 0x18)},
{}
};
diff --git a/drivers/media/video/gspca/stv06xx/Makefile b/drivers/media/video/gspca/stv06xx/Makefile
index feeaa94ab588..2f3c3a606ce4 100644
--- a/drivers/media/video/gspca/stv06xx/Makefile
+++ b/drivers/media/video/gspca/stv06xx/Makefile
@@ -3,7 +3,8 @@ obj-$(CONFIG_USB_STV06XX) += gspca_stv06xx.o
gspca_stv06xx-objs := stv06xx.o \
stv06xx_vv6410.o \
stv06xx_hdcs.o \
- stv06xx_pb0100.o
+ stv06xx_pb0100.o \
+ stv06xx_st6422.o
EXTRA_CFLAGS += -Idrivers/media/video/gspca
diff --git a/drivers/media/video/gspca/stv06xx/stv06xx.c b/drivers/media/video/gspca/stv06xx/stv06xx.c
index e573c3406324..0da8e0de0456 100644
--- a/drivers/media/video/gspca/stv06xx/stv06xx.c
+++ b/drivers/media/video/gspca/stv06xx/stv06xx.c
@@ -92,11 +92,10 @@ static int stv06xx_write_sensor_finish(struct sd *sd)
{
int err = 0;
- if (IS_850(sd)) {
+ if (sd->bridge == BRIDGE_STV610) {
struct usb_device *udev = sd->gspca_dev.dev;
__u8 *buf = sd->gspca_dev.usb_buf;
- /* Quickam Web needs an extra packet */
buf[0] = 0;
err = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
0x04, 0x40, 0x1704, 0, buf, 1,
@@ -253,7 +252,7 @@ static int stv06xx_init(struct gspca_dev *gspca_dev)
err = sd->sensor->init(sd);
- if (dump_sensor)
+ if (dump_sensor && sd->sensor->dump)
sd->sensor->dump(sd);
return (err < 0) ? err : 0;
@@ -318,6 +317,8 @@ static void stv06xx_pkt_scan(struct gspca_dev *gspca_dev,
__u8 *data, /* isoc packet */
int len) /* iso packet length */
{
+ struct sd *sd = (struct sd *) gspca_dev;
+
PDEBUG(D_PACK, "Packet of length %d arrived", len);
/* A packet may contain several frames
@@ -343,14 +344,29 @@ static void stv06xx_pkt_scan(struct gspca_dev *gspca_dev,
if (len < chunk_len) {
PDEBUG(D_ERR, "URB packet length is smaller"
" than the specified chunk length");
+ gspca_dev->last_packet_type = DISCARD_PACKET;
return;
}
+ /* First byte seem to be 02=data 2nd byte is unknown??? */
+ if (sd->bridge == BRIDGE_ST6422 && (id & 0xFF00) == 0x0200)
+ goto frame_data;
+
switch (id) {
case 0x0200:
case 0x4200:
+frame_data:
PDEBUG(D_PACK, "Frame data packet detected");
+ if (sd->to_skip) {
+ int skip = (sd->to_skip < chunk_len) ?
+ sd->to_skip : chunk_len;
+ data += skip;
+ len -= skip;
+ chunk_len -= skip;
+ sd->to_skip -= skip;
+ }
+
gspca_frame_add(gspca_dev, INTER_PACKET, frame,
data, chunk_len);
break;
@@ -365,6 +381,9 @@ static void stv06xx_pkt_scan(struct gspca_dev *gspca_dev,
gspca_frame_add(gspca_dev, FIRST_PACKET,
frame, data, 0);
+ if (sd->bridge == BRIDGE_ST6422)
+ sd->to_skip = gspca_dev->width * 4;
+
if (chunk_len)
PDEBUG(D_ERR, "Chunk length is "
"non-zero on a SOF");
@@ -395,8 +414,12 @@ static void stv06xx_pkt_scan(struct gspca_dev *gspca_dev,
/* Unknown chunk with 2 bytes of data,
occurs 2-3 times per USB interrupt */
break;
+ case 0x42ff:
+ PDEBUG(D_PACK, "Chunk 0x42ff detected");
+ /* Special chunk seen sometimes on the ST6422 */
+ break;
default:
- PDEBUG(D_PACK, "Unknown chunk %d detected", id);
+ PDEBUG(D_PACK, "Unknown chunk 0x%04x detected", id);
/* Unknown chunk */
}
data += chunk_len;
@@ -428,11 +451,16 @@ static int stv06xx_config(struct gspca_dev *gspca_dev,
cam = &gspca_dev->cam;
sd->desc = sd_desc;
+ sd->bridge = id->driver_info;
gspca_dev->sd_desc = &sd->desc;
if (dump_bridge)
stv06xx_dump_bridge(sd);
+ sd->sensor = &stv06xx_sensor_st6422;
+ if (!sd->sensor->probe(sd))
+ return 0;
+
sd->sensor = &stv06xx_sensor_vv6410;
if (!sd->sensor->probe(sd))
return 0;
@@ -457,9 +485,20 @@ static int stv06xx_config(struct gspca_dev *gspca_dev,
/* -- module initialisation -- */
static const __devinitdata struct usb_device_id device_table[] = {
- {USB_DEVICE(0x046d, 0x0840)}, /* QuickCam Express */
- {USB_DEVICE(0x046d, 0x0850)}, /* LEGO cam / QuickCam Web */
- {USB_DEVICE(0x046d, 0x0870)}, /* Dexxa WebCam USB */
+ /* QuickCam Express */
+ {USB_DEVICE(0x046d, 0x0840), .driver_info = BRIDGE_STV600 },
+ /* LEGO cam / QuickCam Web */
+ {USB_DEVICE(0x046d, 0x0850), .driver_info = BRIDGE_STV610 },
+ /* Dexxa WebCam USB */
+ {USB_DEVICE(0x046d, 0x0870), .driver_info = BRIDGE_STV602 },
+ /* QuickCam Messenger */
+ {USB_DEVICE(0x046D, 0x08F0), .driver_info = BRIDGE_ST6422 },
+ /* QuickCam Communicate */
+ {USB_DEVICE(0x046D, 0x08F5), .driver_info = BRIDGE_ST6422 },
+ /* QuickCam Messenger (new) */
+ {USB_DEVICE(0x046D, 0x08F6), .driver_info = BRIDGE_ST6422 },
+ /* QuickCam Messenger (new) */
+ {USB_DEVICE(0x046D, 0x08DA), .driver_info = BRIDGE_ST6422 },
{}
};
MODULE_DEVICE_TABLE(usb, device_table);
diff --git a/drivers/media/video/gspca/stv06xx/stv06xx.h b/drivers/media/video/gspca/stv06xx/stv06xx.h
index 1207e7d17f14..992ce530f138 100644
--- a/drivers/media/video/gspca/stv06xx/stv06xx.h
+++ b/drivers/media/video/gspca/stv06xx/stv06xx.h
@@ -36,10 +36,6 @@
#define STV_ISOC_ENDPOINT_ADDR 0x81
-#ifndef V4L2_PIX_FMT_SGRBG8
-#define V4L2_PIX_FMT_SGRBG8 v4l2_fourcc('G', 'R', 'B', 'G')
-#endif
-
#define STV_REG23 0x0423
/* Control registers of the STV0600 ASIC */
@@ -93,6 +89,17 @@ struct sd {
/* Sensor private data */
void *sensor_priv;
+
+ /* The first 4 lines produced by the stv6422 are no good, this keeps
+ track of how many bytes we still need to skip during a frame */
+ int to_skip;
+
+ /* Bridge / Camera type */
+ u8 bridge;
+ #define BRIDGE_STV600 0
+ #define BRIDGE_STV602 1
+ #define BRIDGE_STV610 2
+ #define BRIDGE_ST6422 3 /* With integrated sensor */
};
int stv06xx_write_bridge(struct sd *sd, u16 address, u16 i2c_data);
diff --git a/drivers/media/video/gspca/stv06xx/stv06xx_hdcs.c b/drivers/media/video/gspca/stv06xx/stv06xx_hdcs.c
index b16903814203..3039ec208f3a 100644
--- a/drivers/media/video/gspca/stv06xx/stv06xx_hdcs.c
+++ b/drivers/media/video/gspca/stv06xx/stv06xx_hdcs.c
@@ -434,7 +434,7 @@ static int hdcs_probe_1x00(struct sd *sd)
hdcs->exp.er = 100;
/*
- * Frame rate on HDCS-1000 0x46D:0x840 depends on PSMP:
+ * Frame rate on HDCS-1000 with STV600 depends on PSMP:
* 4 = doesn't work at all
* 5 = 7.8 fps,
* 6 = 6.9 fps,
@@ -443,7 +443,7 @@ static int hdcs_probe_1x00(struct sd *sd)
* 15 = 4.4 fps,
* 31 = 2.8 fps
*
- * Frame rate on HDCS-1000 0x46D:0x870 depends on PSMP:
+ * Frame rate on HDCS-1000 with STV602 depends on PSMP:
* 15 = doesn't work at all
* 18 = doesn't work at all
* 19 = 7.3 fps
@@ -453,7 +453,7 @@ static int hdcs_probe_1x00(struct sd *sd)
* 24 = 6.3 fps
* 30 = 5.4 fps
*/
- hdcs->psmp = IS_870(sd) ? 20 : 5;
+ hdcs->psmp = (sd->bridge == BRIDGE_STV602) ? 20 : 5;
sd->sensor_priv = hdcs;
@@ -530,7 +530,7 @@ static int hdcs_init(struct sd *sd)
int i, err = 0;
/* Set the STV0602AA in STV0600 emulation mode */
- if (IS_870(sd))
+ if (sd->bridge == BRIDGE_STV602)
stv06xx_write_bridge(sd, STV_STV0600_EMULATION, 1);
/* Execute the bridge init */
@@ -558,7 +558,7 @@ static int hdcs_init(struct sd *sd)
return err;
/* Set PGA sample duration
- (was 0x7E for IS_870, but caused slow framerate with HDCS-1020) */
+ (was 0x7E for the STV602, but caused slow framerate with HDCS-1020) */
if (IS_1020(sd))
err = stv06xx_write_sensor(sd, HDCS_TCTRL,
(HDCS_ADC_START_SIG_DUR << 6) | hdcs->psmp);
diff --git a/drivers/media/video/gspca/stv06xx/stv06xx_sensor.h b/drivers/media/video/gspca/stv06xx/stv06xx_sensor.h
index e88c42f7d2f8..934b9cebc1ab 100644
--- a/drivers/media/video/gspca/stv06xx/stv06xx_sensor.h
+++ b/drivers/media/video/gspca/stv06xx/stv06xx_sensor.h
@@ -32,14 +32,13 @@
#include "stv06xx.h"
-#define IS_850(sd) ((sd)->gspca_dev.dev->descriptor.idProduct == 0x850)
-#define IS_870(sd) ((sd)->gspca_dev.dev->descriptor.idProduct == 0x870)
#define IS_1020(sd) ((sd)->sensor == &stv06xx_sensor_hdcs1020)
extern const struct stv06xx_sensor stv06xx_sensor_vv6410;
extern const struct stv06xx_sensor stv06xx_sensor_hdcs1x00;
extern const struct stv06xx_sensor stv06xx_sensor_hdcs1020;
extern const struct stv06xx_sensor stv06xx_sensor_pb0100;
+extern const struct stv06xx_sensor stv06xx_sensor_st6422;
struct stv06xx_sensor {
/* Defines the name of a sensor */
diff --git a/drivers/media/video/gspca/stv06xx/stv06xx_st6422.c b/drivers/media/video/gspca/stv06xx/stv06xx_st6422.c
new file mode 100644
index 000000000000..87cb5b9ddfa7
--- /dev/null
+++ b/drivers/media/video/gspca/stv06xx/stv06xx_st6422.c
@@ -0,0 +1,453 @@
+/*
+ * Support for the sensor part which is integrated (I think) into the
+ * st6422 stv06xx alike bridge, as its integrated there are no i2c writes
+ * but instead direct bridge writes.
+ *
+ * Copyright (c) 2009 Hans de Goede <hdegoede@redhat.com>
+ *
+ * Strongly based on qc-usb-messenger, which is:
+ * Copyright (c) 2001 Jean-Fredric Clere, Nikolas Zimmermann, Georg Acher
+ * Mark Cave-Ayland, Carlo E Prelz, Dick Streefland
+ * Copyright (c) 2002, 2003 Tuukka Toivonen
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#include "stv06xx_st6422.h"
+
+static struct v4l2_pix_format st6422_mode[] = {
+ /* Note we actually get 124 lines of data, of which we skip the 4st
+ 4 as they are garbage */
+ {
+ 162,
+ 120,
+ V4L2_PIX_FMT_SGRBG8,
+ V4L2_FIELD_NONE,
+ .sizeimage = 162 * 120,
+ .bytesperline = 162,
+ .colorspace = V4L2_COLORSPACE_SRGB,
+ .priv = 1
+ },
+ /* Note we actually get 248 lines of data, of which we skip the 4st
+ 4 as they are garbage, and we tell the app it only gets the
+ first 240 of the 244 lines it actually gets, so that it ignores
+ the last 4. */
+ {
+ 324,
+ 240,
+ V4L2_PIX_FMT_SGRBG8,
+ V4L2_FIELD_NONE,
+ .sizeimage = 324 * 244,
+ .bytesperline = 324,
+ .colorspace = V4L2_COLORSPACE_SRGB,
+ .priv = 0
+ },
+};
+
+static const struct ctrl st6422_ctrl[] = {
+#define BRIGHTNESS_IDX 0
+ {
+ {
+ .id = V4L2_CID_BRIGHTNESS,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Brightness",
+ .minimum = 0,
+ .maximum = 31,
+ .step = 1,
+ .default_value = 3
+ },
+ .set = st6422_set_brightness,
+ .get = st6422_get_brightness
+ },
+#define CONTRAST_IDX 1
+ {
+ {
+ .id = V4L2_CID_CONTRAST,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Contrast",
+ .minimum = 0,
+ .maximum = 15,
+ .step = 1,
+ .default_value = 11
+ },
+ .set = st6422_set_contrast,
+ .get = st6422_get_contrast
+ },
+#define GAIN_IDX 2
+ {
+ {
+ .id = V4L2_CID_GAIN,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Gain",
+ .minimum = 0,
+ .maximum = 255,
+ .step = 1,
+ .default_value = 64
+ },
+ .set = st6422_set_gain,
+ .get = st6422_get_gain
+ },
+#define EXPOSURE_IDX 3
+ {
+ {
+ .id = V4L2_CID_EXPOSURE,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Exposure",
+ .minimum = 0,
+ .maximum = 1023,
+ .step = 1,
+ .default_value = 256
+ },
+ .set = st6422_set_exposure,
+ .get = st6422_get_exposure
+ },
+};
+
+static int st6422_probe(struct sd *sd)
+{
+ int i;
+ s32 *sensor_settings;
+
+ if (sd->bridge != BRIDGE_ST6422)
+ return -ENODEV;
+
+ info("st6422 sensor detected");
+
+ sensor_settings = kmalloc(ARRAY_SIZE(st6422_ctrl) * sizeof(s32),
+ GFP_KERNEL);
+ if (!sensor_settings)
+ return -ENOMEM;
+
+ sd->gspca_dev.cam.cam_mode = st6422_mode;
+ sd->gspca_dev.cam.nmodes = ARRAY_SIZE(st6422_mode);
+ sd->desc.ctrls = st6422_ctrl;
+ sd->desc.nctrls = ARRAY_SIZE(st6422_ctrl);
+ sd->sensor_priv = sensor_settings;
+
+ for (i = 0; i < sd->desc.nctrls; i++)
+ sensor_settings[i] = st6422_ctrl[i].qctrl.default_value;
+
+ return 0;
+}
+
+static int st6422_init(struct sd *sd)
+{
+ int err = 0, i;
+
+ const u16 st6422_bridge_init[][2] = {
+ { STV_ISO_ENABLE, 0x00 }, /* disable capture */
+ { 0x1436, 0x00 },
+ { 0x1432, 0x03 }, /* 0x00-0x1F brightness */
+ { 0x143a, 0xF9 }, /* 0x00-0x0F contrast */
+ { 0x0509, 0x38 }, /* R */
+ { 0x050a, 0x38 }, /* G */
+ { 0x050b, 0x38 }, /* B */
+ { 0x050c, 0x2A },
+ { 0x050d, 0x01 },
+
+
+ { 0x1431, 0x00 }, /* 0x00-0x07 ??? */
+ { 0x1433, 0x34 }, /* 160x120, 0x00-0x01 night filter */
+ { 0x1438, 0x18 }, /* 640x480 */
+/* 18 bayes */
+/* 10 compressed? */
+
+ { 0x1439, 0x00 },
+/* antiflimmer?? 0xa2 ger perfekt bild mot monitor */
+
+ { 0x143b, 0x05 },
+ { 0x143c, 0x00 }, /* 0x00-0x01 - ??? */
+
+
+/* shutter time 0x0000-0x03FF */
+/* low value give good picures on moving objects (but requires much light) */
+/* high value gives good picures in darkness (but tends to be overexposed) */
+ { 0x143e, 0x01 },
+ { 0x143d, 0x00 },
+
+ { 0x1442, 0xe2 },
+/* write: 1x1x xxxx */
+/* read: 1x1x xxxx */
+/* bit 5 == button pressed and hold if 0 */
+/* write 0xe2,0xea */
+
+/* 0x144a */
+/* 0x00 init */
+/* bit 7 == button has been pressed, but not handled */
+
+/* interrupt */
+/* if(urb->iso_frame_desc[i].status == 0x80) { */
+/* if(urb->iso_frame_desc[i].status == 0x88) { */
+
+ { 0x1500, 0xd0 },
+ { 0x1500, 0xd0 },
+ { 0x1500, 0x50 }, /* 0x00 - 0xFF 0x80 == compr ? */
+
+ { 0x1501, 0xaf },
+/* high val-> ljus area blir morkare. */
+/* low val -> ljus area blir ljusare. */
+ { 0x1502, 0xc2 },
+/* high val-> ljus area blir morkare. */
+/* low val -> ljus area blir ljusare. */
+ { 0x1503, 0x45 },
+/* high val-> ljus area blir morkare. */
+/* low val -> ljus area blir ljusare. */
+
+ { 0x1505, 0x02 },
+/* 2 : 324x248 80352 bytes */
+/* 7 : 248x162 40176 bytes */
+/* c+f: 162*124 20088 bytes */
+
+ { 0x150e, 0x8e },
+ { 0x150f, 0x37 },
+ { 0x15c0, 0x00 },
+ { 0x15c1, 1023 }, /* 160x120, ISOC_PACKET_SIZE */
+ { 0x15c3, 0x08 }, /* 0x04/0x14 ... test pictures ??? */
+
+
+ { 0x143f, 0x01 }, /* commit settings */
+
+ };
+
+ for (i = 0; i < ARRAY_SIZE(st6422_bridge_init) && !err; i++) {
+ err = stv06xx_write_bridge(sd, st6422_bridge_init[i][0],
+ st6422_bridge_init[i][1]);
+ }
+
+ return err;
+}
+
+static void st6422_disconnect(struct sd *sd)
+{
+ sd->sensor = NULL;
+ kfree(sd->sensor_priv);
+}
+
+static int st6422_start(struct sd *sd)
+{
+ int err, packet_size;
+ struct cam *cam = &sd->gspca_dev.cam;
+ s32 *sensor_settings = sd->sensor_priv;
+ struct usb_host_interface *alt;
+ struct usb_interface *intf;
+
+ intf = usb_ifnum_to_if(sd->gspca_dev.dev, sd->gspca_dev.iface);
+ alt = usb_altnum_to_altsetting(intf, sd->gspca_dev.alt);
+ if (!alt) {
+ PDEBUG(D_ERR, "Couldn't get altsetting");
+ return -EIO;
+ }
+
+ packet_size = le16_to_cpu(alt->endpoint[0].desc.wMaxPacketSize);
+ err = stv06xx_write_bridge(sd, 0x15c1, packet_size);
+ if (err < 0)
+ return err;
+
+ if (cam->cam_mode[sd->gspca_dev.curr_mode].priv)
+ err = stv06xx_write_bridge(sd, 0x1505, 0x0f);
+ else
+ err = stv06xx_write_bridge(sd, 0x1505, 0x02);
+ if (err < 0)
+ return err;
+
+ err = st6422_set_brightness(&sd->gspca_dev,
+ sensor_settings[BRIGHTNESS_IDX]);
+ if (err < 0)
+ return err;
+
+ err = st6422_set_contrast(&sd->gspca_dev,
+ sensor_settings[CONTRAST_IDX]);
+ if (err < 0)
+ return err;
+
+ err = st6422_set_exposure(&sd->gspca_dev,
+ sensor_settings[EXPOSURE_IDX]);
+ if (err < 0)
+ return err;
+
+ err = st6422_set_gain(&sd->gspca_dev,
+ sensor_settings[GAIN_IDX]);
+ if (err < 0)
+ return err;
+
+ PDEBUG(D_STREAM, "Starting stream");
+
+ return 0;
+}
+
+static int st6422_stop(struct sd *sd)
+{
+ PDEBUG(D_STREAM, "Halting stream");
+
+ return 0;
+}
+
+static int st6422_get_brightness(struct gspca_dev *gspca_dev, __s32 *val)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+ s32 *sensor_settings = sd->sensor_priv;
+
+ *val = sensor_settings[BRIGHTNESS_IDX];
+
+ PDEBUG(D_V4L2, "Read brightness %d", *val);
+
+ return 0;
+}
+
+static int st6422_set_brightness(struct gspca_dev *gspca_dev, __s32 val)
+{
+ int err;
+ struct sd *sd = (struct sd *) gspca_dev;
+ s32 *sensor_settings = sd->sensor_priv;
+
+ sensor_settings[BRIGHTNESS_IDX] = val;
+
+ if (!gspca_dev->streaming)
+ return 0;
+
+ /* val goes from 0 -> 31 */
+ PDEBUG(D_V4L2, "Set brightness to %d", val);
+ err = stv06xx_write_bridge(sd, 0x1432, val);
+ if (err < 0)
+ return err;
+
+ /* commit settings */
+ err = stv06xx_write_bridge(sd, 0x143f, 0x01);
+ return (err < 0) ? err : 0;
+}
+
+static int st6422_get_contrast(struct gspca_dev *gspca_dev, __s32 *val)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+ s32 *sensor_settings = sd->sensor_priv;
+
+ *val = sensor_settings[CONTRAST_IDX];
+
+ PDEBUG(D_V4L2, "Read contrast %d", *val);
+
+ return 0;
+}
+
+static int st6422_set_contrast(struct gspca_dev *gspca_dev, __s32 val)
+{
+ int err;
+ struct sd *sd = (struct sd *) gspca_dev;
+ s32 *sensor_settings = sd->sensor_priv;
+
+ sensor_settings[CONTRAST_IDX] = val;
+
+ if (!gspca_dev->streaming)
+ return 0;
+
+ /* Val goes from 0 -> 15 */
+ PDEBUG(D_V4L2, "Set contrast to %d\n", val);
+ err = stv06xx_write_bridge(sd, 0x143a, 0xf0 | val);
+ if (err < 0)
+ return err;
+
+ /* commit settings */
+ err = stv06xx_write_bridge(sd, 0x143f, 0x01);
+ return (err < 0) ? err : 0;
+}
+
+static int st6422_get_gain(struct gspca_dev *gspca_dev, __s32 *val)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+ s32 *sensor_settings = sd->sensor_priv;
+
+ *val = sensor_settings[GAIN_IDX];
+
+ PDEBUG(D_V4L2, "Read gain %d", *val);
+
+ return 0;
+}
+
+static int st6422_set_gain(struct gspca_dev *gspca_dev, __s32 val)
+{
+ int err;
+ struct sd *sd = (struct sd *) gspca_dev;
+ s32 *sensor_settings = sd->sensor_priv;
+
+ sensor_settings[GAIN_IDX] = val;
+
+ if (!gspca_dev->streaming)
+ return 0;
+
+ PDEBUG(D_V4L2, "Set gain to %d", val);
+
+ /* Set red, green, blue, gain */
+ err = stv06xx_write_bridge(sd, 0x0509, val);
+ if (err < 0)
+ return err;
+
+ err = stv06xx_write_bridge(sd, 0x050a, val);
+ if (err < 0)
+ return err;
+
+ err = stv06xx_write_bridge(sd, 0x050b, val);
+ if (err < 0)
+ return err;
+
+ /* 2 mystery writes */
+ err = stv06xx_write_bridge(sd, 0x050c, 0x2a);
+ if (err < 0)
+ return err;
+
+ err = stv06xx_write_bridge(sd, 0x050d, 0x01);
+ if (err < 0)
+ return err;
+
+ /* commit settings */
+ err = stv06xx_write_bridge(sd, 0x143f, 0x01);
+ return (err < 0) ? err : 0;
+}
+
+static int st6422_get_exposure(struct gspca_dev *gspca_dev, __s32 *val)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+ s32 *sensor_settings = sd->sensor_priv;
+
+ *val = sensor_settings[EXPOSURE_IDX];
+
+ PDEBUG(D_V4L2, "Read exposure %d", *val);
+
+ return 0;
+}
+
+static int st6422_set_exposure(struct gspca_dev *gspca_dev, __s32 val)
+{
+ int err;
+ struct sd *sd = (struct sd *) gspca_dev;
+ s32 *sensor_settings = sd->sensor_priv;
+
+ sensor_settings[EXPOSURE_IDX] = val;
+
+ if (!gspca_dev->streaming)
+ return 0;
+
+ PDEBUG(D_V4L2, "Set exposure to %d\n", val);
+ err = stv06xx_write_bridge(sd, 0x143d, val & 0xff);
+ if (err < 0)
+ return err;
+
+ err = stv06xx_write_bridge(sd, 0x143e, val >> 8);
+ if (err < 0)
+ return err;
+
+ /* commit settings */
+ err = stv06xx_write_bridge(sd, 0x143f, 0x01);
+ return (err < 0) ? err : 0;
+}
diff --git a/drivers/media/video/gspca/stv06xx/stv06xx_st6422.h b/drivers/media/video/gspca/stv06xx/stv06xx_st6422.h
new file mode 100644
index 000000000000..b2d45fe50522
--- /dev/null
+++ b/drivers/media/video/gspca/stv06xx/stv06xx_st6422.h
@@ -0,0 +1,59 @@
+/*
+ * Support for the sensor part which is integrated (I think) into the
+ * st6422 stv06xx alike bridge, as its integrated there are no i2c writes
+ * but instead direct bridge writes.
+ *
+ * Copyright (c) 2009 Hans de Goede <hdegoede@redhat.com>
+ *
+ * Strongly based on qc-usb-messenger, which is:
+ * Copyright (c) 2001 Jean-Fredric Clere, Nikolas Zimmermann, Georg Acher
+ * Mark Cave-Ayland, Carlo E Prelz, Dick Streefland
+ * Copyright (c) 2002, 2003 Tuukka Toivonen
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#ifndef STV06XX_ST6422_H_
+#define STV06XX_ST6422_H_
+
+#include "stv06xx_sensor.h"
+
+static int st6422_probe(struct sd *sd);
+static int st6422_start(struct sd *sd);
+static int st6422_init(struct sd *sd);
+static int st6422_stop(struct sd *sd);
+static void st6422_disconnect(struct sd *sd);
+
+/* V4L2 controls supported by the driver */
+static int st6422_get_brightness(struct gspca_dev *gspca_dev, __s32 *val);
+static int st6422_set_brightness(struct gspca_dev *gspca_dev, __s32 val);
+static int st6422_get_contrast(struct gspca_dev *gspca_dev, __s32 *val);
+static int st6422_set_contrast(struct gspca_dev *gspca_dev, __s32 val);
+static int st6422_get_gain(struct gspca_dev *gspca_dev, __s32 *val);
+static int st6422_set_gain(struct gspca_dev *gspca_dev, __s32 val);
+static int st6422_get_exposure(struct gspca_dev *gspca_dev, __s32 *val);
+static int st6422_set_exposure(struct gspca_dev *gspca_dev, __s32 val);
+
+const struct stv06xx_sensor stv06xx_sensor_st6422 = {
+ .name = "ST6422",
+ .init = st6422_init,
+ .probe = st6422_probe,
+ .start = st6422_start,
+ .stop = st6422_stop,
+ .disconnect = st6422_disconnect,
+};
+
+#endif
diff --git a/drivers/media/video/ivtv/ivtv-controls.c b/drivers/media/video/ivtv/ivtv-controls.c
index 84995bcf4a75..a3b77ed3f089 100644
--- a/drivers/media/video/ivtv/ivtv-controls.c
+++ b/drivers/media/video/ivtv/ivtv-controls.c
@@ -60,6 +60,8 @@ int ivtv_queryctrl(struct file *file, void *fh, struct v4l2_queryctrl *qctrl)
switch (qctrl->id) {
/* Standard V4L2 controls */
+ case V4L2_CID_USER_CLASS:
+ return v4l2_ctrl_query_fill(qctrl, 0, 0, 0, 0);
case V4L2_CID_BRIGHTNESS:
case V4L2_CID_HUE:
case V4L2_CID_SATURATION:
diff --git a/drivers/media/video/mt9m001.c b/drivers/media/video/mt9m001.c
index 459c04cbf69d..4d794b42d6cd 100644
--- a/drivers/media/video/mt9m001.c
+++ b/drivers/media/video/mt9m001.c
@@ -280,15 +280,9 @@ static int mt9m001_try_fmt(struct soc_camera_device *icd,
{
struct v4l2_pix_format *pix = &f->fmt.pix;
- if (pix->height < 32 + icd->y_skip_top)
- pix->height = 32 + icd->y_skip_top;
- if (pix->height > 1024 + icd->y_skip_top)
- pix->height = 1024 + icd->y_skip_top;
- if (pix->width < 48)
- pix->width = 48;
- if (pix->width > 1280)
- pix->width = 1280;
- pix->width &= ~0x01; /* has to be even, unsure why was ~3 */
+ v4l_bound_align_image(&pix->width, 48, 1280, 1,
+ &pix->height, 32 + icd->y_skip_top,
+ 1024 + icd->y_skip_top, 0, 0);
return 0;
}
diff --git a/drivers/media/video/mt9t031.c b/drivers/media/video/mt9t031.c
index f72aeb7c4deb..4207fb342670 100644
--- a/drivers/media/video/mt9t031.c
+++ b/drivers/media/video/mt9t031.c
@@ -385,17 +385,9 @@ static int mt9t031_try_fmt(struct soc_camera_device *icd,
{
struct v4l2_pix_format *pix = &f->fmt.pix;
- if (pix->height < MT9T031_MIN_HEIGHT)
- pix->height = MT9T031_MIN_HEIGHT;
- if (pix->height > MT9T031_MAX_HEIGHT)
- pix->height = MT9T031_MAX_HEIGHT;
- if (pix->width < MT9T031_MIN_WIDTH)
- pix->width = MT9T031_MIN_WIDTH;
- if (pix->width > MT9T031_MAX_WIDTH)
- pix->width = MT9T031_MAX_WIDTH;
-
- pix->width &= ~0x01; /* has to be even */
- pix->height &= ~0x01; /* has to be even */
+ v4l_bound_align_image(
+ &pix->width, MT9T031_MIN_WIDTH, MT9T031_MAX_WIDTH, 1,
+ &pix->height, MT9T031_MIN_HEIGHT, MT9T031_MAX_HEIGHT, 1, 0);
return 0;
}
diff --git a/drivers/media/video/mt9v011.c b/drivers/media/video/mt9v011.c
new file mode 100644
index 000000000000..1fe8fc9183a7
--- /dev/null
+++ b/drivers/media/video/mt9v011.c
@@ -0,0 +1,431 @@
+/*
+ * mt9v011 -Micron 1/4-Inch VGA Digital Image Sensor
+ *
+ * Copyright (c) 2009 Mauro Carvalho Chehab (mchehab@redhat.com)
+ * This code is placed under the terms of the GNU General Public License v2
+ */
+
+#include <linux/i2c.h>
+#include <linux/videodev2.h>
+#include <linux/delay.h>
+#include <media/v4l2-device.h>
+#include "mt9v011.h"
+#include <media/v4l2-i2c-drv.h>
+#include <media/v4l2-chip-ident.h>
+
+MODULE_DESCRIPTION("Micron mt9v011 sensor driver");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
+MODULE_LICENSE("GPL");
+
+
+static int debug;
+module_param(debug, int, 0);
+MODULE_PARM_DESC(debug, "Debug level (0-2)");
+
+/* supported controls */
+static struct v4l2_queryctrl mt9v011_qctrl[] = {
+ {
+ .id = V4L2_CID_GAIN,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Gain",
+ .minimum = 0,
+ .maximum = (1 << 10) - 1,
+ .step = 1,
+ .default_value = 0x0020,
+ .flags = 0,
+ }, {
+ .id = V4L2_CID_RED_BALANCE,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Red Balance",
+ .minimum = -1 << 9,
+ .maximum = (1 << 9) - 1,
+ .step = 1,
+ .default_value = 0,
+ .flags = 0,
+ }, {
+ .id = V4L2_CID_BLUE_BALANCE,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Blue Balance",
+ .minimum = -1 << 9,
+ .maximum = (1 << 9) - 1,
+ .step = 1,
+ .default_value = 0,
+ .flags = 0,
+ },
+};
+
+struct mt9v011 {
+ struct v4l2_subdev sd;
+ unsigned width, height;
+
+ u16 global_gain, red_bal, blue_bal;
+};
+
+static inline struct mt9v011 *to_mt9v011(struct v4l2_subdev *sd)
+{
+ return container_of(sd, struct mt9v011, sd);
+}
+
+static int mt9v011_read(struct v4l2_subdev *sd, unsigned char addr)
+{
+ struct i2c_client *c = v4l2_get_subdevdata(sd);
+ __be16 buffer;
+ int rc, val;
+
+ rc = i2c_master_send(c, &addr, 1);
+ if (rc != 1)
+ v4l2_dbg(0, debug, sd,
+ "i2c i/o error: rc == %d (should be 1)\n", rc);
+
+ msleep(10);
+
+ rc = i2c_master_recv(c, (char *)&buffer, 2);
+ if (rc != 2)
+ v4l2_dbg(0, debug, sd,
+ "i2c i/o error: rc == %d (should be 2)\n", rc);
+
+ val = be16_to_cpu(buffer);
+
+ v4l2_dbg(2, debug, sd, "mt9v011: read 0x%02x = 0x%04x\n", addr, val);
+
+ return val;
+}
+
+static void mt9v011_write(struct v4l2_subdev *sd, unsigned char addr,
+ u16 value)
+{
+ struct i2c_client *c = v4l2_get_subdevdata(sd);
+ unsigned char buffer[3];
+ int rc;
+
+ buffer[0] = addr;
+ buffer[1] = value >> 8;
+ buffer[2] = value & 0xff;
+
+ v4l2_dbg(2, debug, sd,
+ "mt9v011: writing 0x%02x 0x%04x\n", buffer[0], value);
+ rc = i2c_master_send(c, buffer, 3);
+ if (rc != 3)
+ v4l2_dbg(0, debug, sd,
+ "i2c i/o error: rc == %d (should be 3)\n", rc);
+}
+
+
+struct i2c_reg_value {
+ unsigned char reg;
+ u16 value;
+};
+
+/*
+ * Values used at the original driver
+ * Some values are marked as Reserved at the datasheet
+ */
+static const struct i2c_reg_value mt9v011_init_default[] = {
+ { R0D_MT9V011_RESET, 0x0001 },
+ { R0D_MT9V011_RESET, 0x0000 },
+
+ { R0C_MT9V011_SHUTTER_DELAY, 0x0000 },
+ { R09_MT9V011_SHUTTER_WIDTH, 0x1fc },
+
+ { R0A_MT9V011_CLK_SPEED, 0x0000 },
+ { R1E_MT9V011_DIGITAL_ZOOM, 0x0000 },
+ { R20_MT9V011_READ_MODE, 0x1000 },
+
+ { R07_MT9V011_OUT_CTRL, 0x000a }, /* chip enable */
+};
+
+static void set_balance(struct v4l2_subdev *sd)
+{
+ struct mt9v011 *core = to_mt9v011(sd);
+ u16 green1_gain, green2_gain, blue_gain, red_gain;
+
+ green1_gain = core->global_gain;
+ green2_gain = core->global_gain;
+
+ blue_gain = core->global_gain +
+ core->global_gain * core->blue_bal / (1 << 9);
+
+ red_gain = core->global_gain +
+ core->global_gain * core->blue_bal / (1 << 9);
+
+ mt9v011_write(sd, R2B_MT9V011_GREEN_1_GAIN, green1_gain);
+ mt9v011_write(sd, R2E_MT9V011_GREEN_2_GAIN, green1_gain);
+ mt9v011_write(sd, R2C_MT9V011_BLUE_GAIN, blue_gain);
+ mt9v011_write(sd, R2D_MT9V011_RED_GAIN, red_gain);
+}
+
+static void set_res(struct v4l2_subdev *sd)
+{
+ struct mt9v011 *core = to_mt9v011(sd);
+ unsigned vstart, hstart;
+
+ /*
+ * The mt9v011 doesn't have scaling. So, in order to select the desired
+ * resolution, we're cropping at the middle of the sensor.
+ * hblank and vblank should be adjusted, in order to warrant that
+ * we'll preserve the line timings for 30 fps, no matter what resolution
+ * is selected.
+ * NOTE: datasheet says that width (and height) should be filled with
+ * width-1. However, this doesn't work, since one pixel per line will
+ * be missing.
+ */
+
+ hstart = 14 + (640 - core->width) / 2;
+ mt9v011_write(sd, R02_MT9V011_COLSTART, hstart);
+ mt9v011_write(sd, R04_MT9V011_WIDTH, core->width);
+ mt9v011_write(sd, R05_MT9V011_HBLANK, 771 - core->width);
+
+ vstart = 8 + (640 - core->height) / 2;
+ mt9v011_write(sd, R01_MT9V011_ROWSTART, vstart);
+ mt9v011_write(sd, R03_MT9V011_HEIGHT, core->height);
+ mt9v011_write(sd, R06_MT9V011_VBLANK, 508 - core->height);
+};
+
+static int mt9v011_reset(struct v4l2_subdev *sd, u32 val)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(mt9v011_init_default); i++)
+ mt9v011_write(sd, mt9v011_init_default[i].reg,
+ mt9v011_init_default[i].value);
+
+ set_balance(sd);
+ set_res(sd);
+
+ return 0;
+};
+
+static int mt9v011_g_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
+{
+ struct mt9v011 *core = to_mt9v011(sd);
+
+ v4l2_dbg(1, debug, sd, "g_ctrl called\n");
+
+ switch (ctrl->id) {
+ case V4L2_CID_GAIN:
+ ctrl->value = core->global_gain;
+ return 0;
+ case V4L2_CID_RED_BALANCE:
+ ctrl->value = core->red_bal;
+ return 0;
+ case V4L2_CID_BLUE_BALANCE:
+ ctrl->value = core->blue_bal;
+ return 0;
+ }
+ return -EINVAL;
+}
+
+static int mt9v011_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
+{
+ struct mt9v011 *core = to_mt9v011(sd);
+ u8 i, n;
+ n = ARRAY_SIZE(mt9v011_qctrl);
+
+ for (i = 0; i < n; i++) {
+ if (ctrl->id != mt9v011_qctrl[i].id)
+ continue;
+ if (ctrl->value < mt9v011_qctrl[i].minimum ||
+ ctrl->value > mt9v011_qctrl[i].maximum)
+ return -ERANGE;
+ v4l2_dbg(1, debug, sd, "s_ctrl: id=%d, value=%d\n",
+ ctrl->id, ctrl->value);
+ break;
+ }
+
+ switch (ctrl->id) {
+ case V4L2_CID_GAIN:
+ core->global_gain = ctrl->value;
+ break;
+ case V4L2_CID_RED_BALANCE:
+ core->red_bal = ctrl->value;
+ break;
+ case V4L2_CID_BLUE_BALANCE:
+ core->blue_bal = ctrl->value;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ set_balance(sd);
+
+ return 0;
+}
+
+static int mt9v011_enum_fmt(struct v4l2_subdev *sd, struct v4l2_fmtdesc *fmt)
+{
+ if (fmt->index > 0)
+ return -EINVAL;
+
+ fmt->flags = 0;
+ strcpy(fmt->description, "8 bpp Bayer GRGR..BGBG");
+ fmt->pixelformat = V4L2_PIX_FMT_SGRBG8;
+
+ return 0;
+}
+
+static int mt9v011_try_fmt(struct v4l2_subdev *sd, struct v4l2_format *fmt)
+{
+ struct v4l2_pix_format *pix = &fmt->fmt.pix;
+
+ if (pix->pixelformat != V4L2_PIX_FMT_SGRBG8)
+ return -EINVAL;
+
+ v4l_bound_align_image(&pix->width, 48, 639, 1,
+ &pix->height, 32, 480, 1, 0);
+
+ return 0;
+}
+
+static int mt9v011_s_fmt(struct v4l2_subdev *sd, struct v4l2_format *fmt)
+{
+ struct v4l2_pix_format *pix = &fmt->fmt.pix;
+ struct mt9v011 *core = to_mt9v011(sd);
+ int rc;
+
+ rc = mt9v011_try_fmt(sd, fmt);
+ if (rc < 0)
+ return -EINVAL;
+
+ core->width = pix->width;
+ core->height = pix->height;
+
+ set_res(sd);
+
+ return 0;
+}
+
+
+#ifdef CONFIG_VIDEO_ADV_DEBUG
+static int mt9v011_g_register(struct v4l2_subdev *sd,
+ struct v4l2_dbg_register *reg)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+
+ if (!v4l2_chip_match_i2c_client(client, &reg->match))
+ return -EINVAL;
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ reg->val = mt9v011_read(sd, reg->reg & 0xff);
+ reg->size = 2;
+
+ return 0;
+}
+
+static int mt9v011_s_register(struct v4l2_subdev *sd,
+ struct v4l2_dbg_register *reg)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+
+ if (!v4l2_chip_match_i2c_client(client, &reg->match))
+ return -EINVAL;
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ mt9v011_write(sd, reg->reg & 0xff, reg->val & 0xffff);
+
+ return 0;
+}
+#endif
+
+static int mt9v011_g_chip_ident(struct v4l2_subdev *sd,
+ struct v4l2_dbg_chip_ident *chip)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+
+ return v4l2_chip_ident_i2c_client(client, chip, V4L2_IDENT_MT9V011,
+ MT9V011_VERSION);
+}
+
+static const struct v4l2_subdev_core_ops mt9v011_core_ops = {
+ .g_ctrl = mt9v011_g_ctrl,
+ .s_ctrl = mt9v011_s_ctrl,
+ .reset = mt9v011_reset,
+ .g_chip_ident = mt9v011_g_chip_ident,
+#ifdef CONFIG_VIDEO_ADV_DEBUG
+ .g_register = mt9v011_g_register,
+ .s_register = mt9v011_s_register,
+#endif
+};
+
+static const struct v4l2_subdev_video_ops mt9v011_video_ops = {
+ .enum_fmt = mt9v011_enum_fmt,
+ .try_fmt = mt9v011_try_fmt,
+ .s_fmt = mt9v011_s_fmt,
+};
+
+static const struct v4l2_subdev_ops mt9v011_ops = {
+ .core = &mt9v011_core_ops,
+ .video = &mt9v011_video_ops,
+};
+
+
+/****************************************************************************
+ I2C Client & Driver
+ ****************************************************************************/
+
+static int mt9v011_probe(struct i2c_client *c,
+ const struct i2c_device_id *id)
+{
+ u16 version;
+ struct mt9v011 *core;
+ struct v4l2_subdev *sd;
+
+ /* Check if the adapter supports the needed features */
+ if (!i2c_check_functionality(c->adapter,
+ I2C_FUNC_SMBUS_READ_BYTE | I2C_FUNC_SMBUS_WRITE_BYTE_DATA))
+ return -EIO;
+
+ core = kzalloc(sizeof(struct mt9v011), GFP_KERNEL);
+ if (!core)
+ return -ENOMEM;
+
+ sd = &core->sd;
+ v4l2_i2c_subdev_init(sd, c, &mt9v011_ops);
+
+ /* Check if the sensor is really a MT9V011 */
+ version = mt9v011_read(sd, R00_MT9V011_CHIP_VERSION);
+ if (version != MT9V011_VERSION) {
+ v4l2_info(sd, "*** unknown micron chip detected (0x%04x.\n",
+ version);
+ kfree(core);
+ return -EINVAL;
+ }
+
+ core->global_gain = 0x0024;
+ core->width = 640;
+ core->height = 480;
+
+ v4l_info(c, "chip found @ 0x%02x (%s)\n",
+ c->addr << 1, c->adapter->name);
+
+ return 0;
+}
+
+static int mt9v011_remove(struct i2c_client *c)
+{
+ struct v4l2_subdev *sd = i2c_get_clientdata(c);
+
+ v4l2_dbg(1, debug, sd,
+ "mt9v011.c: removing mt9v011 adapter on address 0x%x\n",
+ c->addr << 1);
+
+ v4l2_device_unregister_subdev(sd);
+ kfree(to_mt9v011(sd));
+ return 0;
+}
+
+/* ----------------------------------------------------------------------- */
+
+static const struct i2c_device_id mt9v011_id[] = {
+ { "mt9v011", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, mt9v011_id);
+
+static struct v4l2_i2c_driver_data v4l2_i2c_data = {
+ .name = "mt9v011",
+ .probe = mt9v011_probe,
+ .remove = mt9v011_remove,
+ .id_table = mt9v011_id,
+};
diff --git a/drivers/media/video/mt9v011.h b/drivers/media/video/mt9v011.h
new file mode 100644
index 000000000000..9e443ee30558
--- /dev/null
+++ b/drivers/media/video/mt9v011.h
@@ -0,0 +1,35 @@
+/*
+ * mt9v011 -Micron 1/4-Inch VGA Digital Image Sensor
+ *
+ * Copyright (c) 2009 Mauro Carvalho Chehab (mchehab@redhat.com)
+ * This code is placed under the terms of the GNU General Public License v2
+ */
+
+#ifndef MT9V011_H_
+#define MT9V011_H_
+
+#define R00_MT9V011_CHIP_VERSION 0x00
+#define R01_MT9V011_ROWSTART 0x01
+#define R02_MT9V011_COLSTART 0x02
+#define R03_MT9V011_HEIGHT 0x03
+#define R04_MT9V011_WIDTH 0x04
+#define R05_MT9V011_HBLANK 0x05
+#define R06_MT9V011_VBLANK 0x06
+#define R07_MT9V011_OUT_CTRL 0x07
+#define R09_MT9V011_SHUTTER_WIDTH 0x09
+#define R0A_MT9V011_CLK_SPEED 0x0a
+#define R0B_MT9V011_RESTART 0x0b
+#define R0C_MT9V011_SHUTTER_DELAY 0x0c
+#define R0D_MT9V011_RESET 0x0d
+#define R1E_MT9V011_DIGITAL_ZOOM 0x1e
+#define R20_MT9V011_READ_MODE 0x20
+#define R2B_MT9V011_GREEN_1_GAIN 0x2b
+#define R2C_MT9V011_BLUE_GAIN 0x2c
+#define R2D_MT9V011_RED_GAIN 0x2d
+#define R2E_MT9V011_GREEN_2_GAIN 0x2e
+#define R35_MT9V011_GLOBAL_GAIN 0x35
+#define RF1_MT9V011_CHIP_ENABLE 0xf1
+
+#define MT9V011_VERSION 0x8243
+
+#endif
diff --git a/drivers/media/video/mt9v022.c b/drivers/media/video/mt9v022.c
index be20d312b1dc..dbdcc86ae50d 100644
--- a/drivers/media/video/mt9v022.c
+++ b/drivers/media/video/mt9v022.c
@@ -364,15 +364,9 @@ static int mt9v022_try_fmt(struct soc_camera_device *icd,
{
struct v4l2_pix_format *pix = &f->fmt.pix;
- if (pix->height < 32 + icd->y_skip_top)
- pix->height = 32 + icd->y_skip_top;
- if (pix->height > 480 + icd->y_skip_top)
- pix->height = 480 + icd->y_skip_top;
- if (pix->width < 48)
- pix->width = 48;
- if (pix->width > 752)
- pix->width = 752;
- pix->width &= ~0x03; /* ? */
+ v4l_bound_align_image(&pix->width, 48, 752, 2 /* ? */,
+ &pix->height, 32 + icd->y_skip_top,
+ 480 + icd->y_skip_top, 0, 0);
return 0;
}
diff --git a/drivers/media/video/ov511.c b/drivers/media/video/ov511.c
index 08cfd3e4ae8a..0bc2cf573c76 100644
--- a/drivers/media/video/ov511.c
+++ b/drivers/media/video/ov511.c
@@ -211,8 +211,6 @@ static const int i2c_detect_tries = 5;
static struct usb_device_id device_table [] = {
{ USB_DEVICE(VEND_OMNIVISION, PROD_OV511) },
{ USB_DEVICE(VEND_OMNIVISION, PROD_OV511PLUS) },
- { USB_DEVICE(VEND_OMNIVISION, PROD_OV518) },
- { USB_DEVICE(VEND_OMNIVISION, PROD_OV518PLUS) },
{ USB_DEVICE(VEND_MATTEL, PROD_ME2CAM) },
{ } /* Terminating entry */
};
diff --git a/drivers/media/video/pvrusb2/pvrusb2-audio.c b/drivers/media/video/pvrusb2/pvrusb2-audio.c
index 10ef1a2c13ea..416933ca607d 100644
--- a/drivers/media/video/pvrusb2/pvrusb2-audio.c
+++ b/drivers/media/video/pvrusb2/pvrusb2-audio.c
@@ -48,11 +48,13 @@ static const int routing_scheme0[] = {
MSP_DSP_IN_SCART),
};
-static const struct routing_scheme routing_schemes[] = {
- [PVR2_ROUTING_SCHEME_HAUPPAUGE] = {
- .def = routing_scheme0,
- .cnt = ARRAY_SIZE(routing_scheme0),
- },
+static const struct routing_scheme routing_def0 = {
+ .def = routing_scheme0,
+ .cnt = ARRAY_SIZE(routing_scheme0),
+};
+
+static const struct routing_scheme *routing_schemes[] = {
+ [PVR2_ROUTING_SCHEME_HAUPPAUGE] = &routing_def0,
};
void pvr2_msp3400_subdev_update(struct pvr2_hdw *hdw, struct v4l2_subdev *sd)
@@ -65,7 +67,7 @@ void pvr2_msp3400_subdev_update(struct pvr2_hdw *hdw, struct v4l2_subdev *sd)
pvr2_trace(PVR2_TRACE_CHIPS, "subdev msp3400 v4l2 set_stereo");
if ((sid < ARRAY_SIZE(routing_schemes)) &&
- ((sp = routing_schemes + sid) != NULL) &&
+ ((sp = routing_schemes[sid]) != NULL) &&
(hdw->input_val >= 0) &&
(hdw->input_val < sp->cnt)) {
input = sp->def[hdw->input_val];
diff --git a/drivers/media/video/pvrusb2/pvrusb2-cs53l32a.c b/drivers/media/video/pvrusb2/pvrusb2-cs53l32a.c
index 9023adf3fdcc..68980e19409f 100644
--- a/drivers/media/video/pvrusb2/pvrusb2-cs53l32a.c
+++ b/drivers/media/video/pvrusb2/pvrusb2-cs53l32a.c
@@ -49,11 +49,13 @@ static const int routing_scheme1[] = {
[PVR2_CVAL_INPUT_SVIDEO] = 0,
};
-static const struct routing_scheme routing_schemes[] = {
- [PVR2_ROUTING_SCHEME_ONAIR] = {
- .def = routing_scheme1,
- .cnt = ARRAY_SIZE(routing_scheme1),
- },
+static const struct routing_scheme routing_def1 = {
+ .def = routing_scheme1,
+ .cnt = ARRAY_SIZE(routing_scheme1),
+};
+
+static const struct routing_scheme *routing_schemes[] = {
+ [PVR2_ROUTING_SCHEME_ONAIR] = &routing_def1,
};
@@ -65,12 +67,11 @@ void pvr2_cs53l32a_subdev_update(struct pvr2_hdw *hdw, struct v4l2_subdev *sd)
u32 input;
pvr2_trace(PVR2_TRACE_CHIPS, "subdev v4l2 set_input(%d)",
hdw->input_val);
- if ((sid < ARRAY_SIZE(routing_schemes)) &&
- ((sp = routing_schemes + sid) != NULL) &&
- (hdw->input_val >= 0) &&
- (hdw->input_val < sp->cnt)) {
- input = sp->def[hdw->input_val];
- } else {
+ sp = (sid < ARRAY_SIZE(routing_schemes)) ?
+ routing_schemes[sid] : NULL;
+ if ((sp == NULL) ||
+ (hdw->input_val < 0) ||
+ (hdw->input_val >= sp->cnt)) {
pvr2_trace(PVR2_TRACE_ERROR_LEGS,
"*** WARNING *** subdev v4l2 set_input:"
" Invalid routing scheme (%u)"
@@ -78,6 +79,7 @@ void pvr2_cs53l32a_subdev_update(struct pvr2_hdw *hdw, struct v4l2_subdev *sd)
sid, hdw->input_val);
return;
}
+ input = sp->def[hdw->input_val];
sd->ops->audio->s_routing(sd, input, 0, 0);
}
}
diff --git a/drivers/media/video/pvrusb2/pvrusb2-cx2584x-v4l.c b/drivers/media/video/pvrusb2/pvrusb2-cx2584x-v4l.c
index 05e52358ae49..82c135835753 100644
--- a/drivers/media/video/pvrusb2/pvrusb2-cx2584x-v4l.c
+++ b/drivers/media/video/pvrusb2/pvrusb2-cx2584x-v4l.c
@@ -68,6 +68,11 @@ static const struct routing_scheme_item routing_scheme0[] = {
},
};
+static const struct routing_scheme routing_def0 = {
+ .def = routing_scheme0,
+ .cnt = ARRAY_SIZE(routing_scheme0),
+};
+
/* Specific to gotview device */
static const struct routing_scheme_item routing_schemegv[] = {
[PVR2_CVAL_INPUT_TV] = {
@@ -90,15 +95,14 @@ static const struct routing_scheme_item routing_schemegv[] = {
},
};
-static const struct routing_scheme routing_schemes[] = {
- [PVR2_ROUTING_SCHEME_HAUPPAUGE] = {
- .def = routing_scheme0,
- .cnt = ARRAY_SIZE(routing_scheme0),
- },
- [PVR2_ROUTING_SCHEME_GOTVIEW] = {
- .def = routing_schemegv,
- .cnt = ARRAY_SIZE(routing_schemegv),
- },
+static const struct routing_scheme routing_defgv = {
+ .def = routing_schemegv,
+ .cnt = ARRAY_SIZE(routing_schemegv),
+};
+
+static const struct routing_scheme *routing_schemes[] = {
+ [PVR2_ROUTING_SCHEME_HAUPPAUGE] = &routing_def0,
+ [PVR2_ROUTING_SCHEME_GOTVIEW] = &routing_defgv,
};
void pvr2_cx25840_subdev_update(struct pvr2_hdw *hdw, struct v4l2_subdev *sd)
@@ -110,13 +114,11 @@ void pvr2_cx25840_subdev_update(struct pvr2_hdw *hdw, struct v4l2_subdev *sd)
const struct routing_scheme *sp;
unsigned int sid = hdw->hdw_desc->signal_routing_scheme;
- if ((sid < ARRAY_SIZE(routing_schemes)) &&
- ((sp = routing_schemes + sid) != NULL) &&
- (hdw->input_val >= 0) &&
- (hdw->input_val < sp->cnt)) {
- vid_input = sp->def[hdw->input_val].vid;
- aud_input = sp->def[hdw->input_val].aud;
- } else {
+ sp = (sid < ARRAY_SIZE(routing_schemes)) ?
+ routing_schemes[sid] : NULL;
+ if ((sp == NULL) ||
+ (hdw->input_val < 0) ||
+ (hdw->input_val >= sp->cnt)) {
pvr2_trace(PVR2_TRACE_ERROR_LEGS,
"*** WARNING *** subdev cx2584x set_input:"
" Invalid routing scheme (%u)"
@@ -124,7 +126,8 @@ void pvr2_cx25840_subdev_update(struct pvr2_hdw *hdw, struct v4l2_subdev *sd)
sid, hdw->input_val);
return;
}
-
+ vid_input = sp->def[hdw->input_val].vid;
+ aud_input = sp->def[hdw->input_val].aud;
pvr2_trace(PVR2_TRACE_CHIPS,
"subdev cx2584x set_input vid=0x%x aud=0x%x",
vid_input, aud_input);
diff --git a/drivers/media/video/pvrusb2/pvrusb2-hdw.c b/drivers/media/video/pvrusb2/pvrusb2-hdw.c
index 0c745b142fb7..cbc388729d77 100644
--- a/drivers/media/video/pvrusb2/pvrusb2-hdw.c
+++ b/drivers/media/video/pvrusb2/pvrusb2-hdw.c
@@ -85,8 +85,8 @@ MODULE_PARM_DESC(video_std,"specify initial video standard");
module_param_array(tolerance, int, NULL, 0444);
MODULE_PARM_DESC(tolerance,"specify stream error tolerance");
-/* US Broadcast channel 7 (175.25 MHz) */
-static int default_tv_freq = 175250000L;
+/* US Broadcast channel 3 (61.25 MHz), to help with testing */
+static int default_tv_freq = 61250000L;
/* 104.3 MHz, a usable FM station for my area */
static int default_radio_freq = 104300000L;
@@ -1987,6 +1987,34 @@ static unsigned int pvr2_copy_i2c_addr_list(
}
+static void pvr2_hdw_cx25840_vbi_hack(struct pvr2_hdw *hdw)
+{
+ /*
+ Mike Isely <isely@pobox.com> 19-Nov-2006 - This bit of nuttiness
+ for cx25840 causes that module to correctly set up its video
+ scaling. This is really a problem in the cx25840 module itself,
+ but we work around it here. The problem has not been seen in
+ ivtv because there VBI is supported and set up. We don't do VBI
+ here (at least not yet) and thus we never attempted to even set
+ it up.
+ */
+ struct v4l2_format fmt;
+ if (hdw->decoder_client_id != PVR2_CLIENT_ID_CX25840) {
+ /* We're not using a cx25840 so don't enable the hack */
+ return;
+ }
+
+ pvr2_trace(PVR2_TRACE_INIT,
+ "Module ID %u:"
+ " Executing cx25840 VBI hack",
+ hdw->decoder_client_id);
+ memset(&fmt, 0, sizeof(fmt));
+ fmt.type = V4L2_BUF_TYPE_SLICED_VBI_CAPTURE;
+ v4l2_device_call_all(&hdw->v4l2_dev, hdw->decoder_client_id,
+ video, s_fmt, &fmt);
+}
+
+
static int pvr2_hdw_load_subdev(struct pvr2_hdw *hdw,
const struct pvr2_device_client_desc *cd)
{
@@ -2078,30 +2106,6 @@ static int pvr2_hdw_load_subdev(struct pvr2_hdw *hdw,
/* client-specific setup... */
switch (mid) {
case PVR2_CLIENT_ID_CX25840:
- hdw->decoder_client_id = mid;
- {
- /*
- Mike Isely <isely@pobox.com> 19-Nov-2006 - This
- bit of nuttiness for cx25840 causes that module
- to correctly set up its video scaling. This is
- really a problem in the cx25840 module itself,
- but we work around it here. The problem has not
- been seen in ivtv because there VBI is supported
- and set up. We don't do VBI here (at least not
- yet) and thus we never attempted to even set it
- up.
- */
- struct v4l2_format fmt;
- pvr2_trace(PVR2_TRACE_INIT,
- "Module ID %u:"
- " Executing cx25840 VBI hack",
- mid);
- memset(&fmt, 0, sizeof(fmt));
- fmt.type = V4L2_BUF_TYPE_SLICED_VBI_CAPTURE;
- v4l2_device_call_all(&hdw->v4l2_dev, mid,
- video, s_fmt, &fmt);
- }
- break;
case PVR2_CLIENT_ID_SAA7115:
hdw->decoder_client_id = mid;
break;
@@ -2202,6 +2206,8 @@ static void pvr2_hdw_setup_low(struct pvr2_hdw *hdw)
cptr->info->set_value(cptr,~0,cptr->info->default_value);
}
+ pvr2_hdw_cx25840_vbi_hack(hdw);
+
/* Set up special default values for the television and radio
frequencies here. It's not really important what these defaults
are, but I set them to something usable in the Chicago area just
@@ -2954,6 +2960,7 @@ static void pvr2_subdev_update(struct pvr2_hdw *hdw)
vs = hdw->std_mask_cur;
v4l2_device_call_all(&hdw->v4l2_dev, 0,
core, s_std, vs);
+ pvr2_hdw_cx25840_vbi_hack(hdw);
}
hdw->tuner_signal_stale = !0;
hdw->cropcap_stale = !0;
@@ -4076,6 +4083,7 @@ int pvr2_hdw_cmd_decoder_reset(struct pvr2_hdw *hdw)
if (hdw->decoder_client_id) {
v4l2_device_call_all(&hdw->v4l2_dev, hdw->decoder_client_id,
core, reset, 0);
+ pvr2_hdw_cx25840_vbi_hack(hdw);
return 0;
}
pvr2_trace(PVR2_TRACE_INIT,
diff --git a/drivers/media/video/pvrusb2/pvrusb2-video-v4l.c b/drivers/media/video/pvrusb2/pvrusb2-video-v4l.c
index d2fe7c8f2c3a..4c96cf48c796 100644
--- a/drivers/media/video/pvrusb2/pvrusb2-video-v4l.c
+++ b/drivers/media/video/pvrusb2/pvrusb2-video-v4l.c
@@ -54,6 +54,11 @@ static const int routing_scheme0[] = {
[PVR2_CVAL_INPUT_SVIDEO] = SAA7115_SVIDEO2,
};
+static const struct routing_scheme routing_def0 = {
+ .def = routing_scheme0,
+ .cnt = ARRAY_SIZE(routing_scheme0),
+};
+
static const int routing_scheme1[] = {
[PVR2_CVAL_INPUT_TV] = SAA7115_COMPOSITE4,
[PVR2_CVAL_INPUT_RADIO] = SAA7115_COMPOSITE5,
@@ -61,15 +66,14 @@ static const int routing_scheme1[] = {
[PVR2_CVAL_INPUT_SVIDEO] = SAA7115_SVIDEO2, /* or SVIDEO0, it seems */
};
-static const struct routing_scheme routing_schemes[] = {
- [PVR2_ROUTING_SCHEME_HAUPPAUGE] = {
- .def = routing_scheme0,
- .cnt = ARRAY_SIZE(routing_scheme0),
- },
- [PVR2_ROUTING_SCHEME_ONAIR] = {
- .def = routing_scheme1,
- .cnt = ARRAY_SIZE(routing_scheme1),
- },
+static const struct routing_scheme routing_def1 = {
+ .def = routing_scheme1,
+ .cnt = ARRAY_SIZE(routing_scheme1),
+};
+
+static const struct routing_scheme *routing_schemes[] = {
+ [PVR2_ROUTING_SCHEME_HAUPPAUGE] = &routing_def0,
+ [PVR2_ROUTING_SCHEME_ONAIR] = &routing_def1,
};
void pvr2_saa7115_subdev_update(struct pvr2_hdw *hdw, struct v4l2_subdev *sd)
@@ -81,12 +85,12 @@ void pvr2_saa7115_subdev_update(struct pvr2_hdw *hdw, struct v4l2_subdev *sd)
pvr2_trace(PVR2_TRACE_CHIPS, "subdev v4l2 set_input(%d)",
hdw->input_val);
- if ((sid < ARRAY_SIZE(routing_schemes)) &&
- ((sp = routing_schemes + sid) != NULL) &&
- (hdw->input_val >= 0) &&
- (hdw->input_val < sp->cnt)) {
- input = sp->def[hdw->input_val];
- } else {
+
+ sp = (sid < ARRAY_SIZE(routing_schemes)) ?
+ routing_schemes[sid] : NULL;
+ if ((sp == NULL) ||
+ (hdw->input_val < 0) ||
+ (hdw->input_val >= sp->cnt)) {
pvr2_trace(PVR2_TRACE_ERROR_LEGS,
"*** WARNING *** subdev v4l2 set_input:"
" Invalid routing scheme (%u)"
@@ -94,6 +98,7 @@ void pvr2_saa7115_subdev_update(struct pvr2_hdw *hdw, struct v4l2_subdev *sd)
sid, hdw->input_val);
return;
}
+ input = sp->def[hdw->input_val];
sd->ops->video->s_routing(sd, input, 0, 0);
}
}
diff --git a/drivers/media/video/pxa_camera.c b/drivers/media/video/pxa_camera.c
index f60de40fd21f..46e0d8ad880f 100644
--- a/drivers/media/video/pxa_camera.c
+++ b/drivers/media/video/pxa_camera.c
@@ -163,13 +163,6 @@
CICR0_EOFM | CICR0_FOM)
/*
- * YUV422P picture size should be a multiple of 16, so the heuristic aligns
- * height, width on 4 byte boundaries to reach the 16 multiple for the size.
- */
-#define YUV422P_X_Y_ALIGN 4
-#define YUV422P_SIZE_ALIGN YUV422P_X_Y_ALIGN * YUV422P_X_Y_ALIGN
-
-/*
* Structures
*/
enum pxa_camera_active_dma {
@@ -1398,28 +1391,15 @@ static int pxa_camera_try_fmt(struct soc_camera_device *icd,
return -EINVAL;
}
- /* limit to pxa hardware capabilities */
- if (pix->height < 32)
- pix->height = 32;
- if (pix->height > 2048)
- pix->height = 2048;
- if (pix->width < 48)
- pix->width = 48;
- if (pix->width > 2048)
- pix->width = 2048;
- pix->width &= ~0x01;
-
/*
- * YUV422P planar format requires images size to be a 16 bytes
- * multiple. If not, zeros will be inserted between Y and U planes, and
- * U and V planes, and YUV422P standard would be violated.
+ * Limit to pxa hardware capabilities. YUV422P planar format requires
+ * images size to be a multiple of 16 bytes. If not, zeros will be
+ * inserted between Y and U planes, and U and V planes, which violates
+ * the YUV422P standard.
*/
- if (xlate->host_fmt->fourcc == V4L2_PIX_FMT_YUV422P) {
- if (!IS_ALIGNED(pix->width * pix->height, YUV422P_SIZE_ALIGN))
- pix->height = ALIGN(pix->height, YUV422P_X_Y_ALIGN);
- if (!IS_ALIGNED(pix->width * pix->height, YUV422P_SIZE_ALIGN))
- pix->width = ALIGN(pix->width, YUV422P_X_Y_ALIGN);
- }
+ v4l_bound_align_image(&pix->width, 48, 2048, 1,
+ &pix->height, 32, 2048, 0,
+ xlate->host_fmt->fourcc == V4L2_PIX_FMT_YUV422P ? 4 : 0);
pix->bytesperline = pix->width *
DIV_ROUND_UP(xlate->host_fmt->depth, 8);
diff --git a/drivers/media/video/saa7134/saa7134-video.c b/drivers/media/video/saa7134/saa7134-video.c
index e305c1674cee..ba87128542e0 100644
--- a/drivers/media/video/saa7134/saa7134-video.c
+++ b/drivers/media/video/saa7134/saa7134-video.c
@@ -1640,15 +1640,8 @@ static int saa7134_try_fmt_vid_cap(struct file *file, void *priv,
}
f->fmt.pix.field = field;
- if (f->fmt.pix.width < 48)
- f->fmt.pix.width = 48;
- if (f->fmt.pix.height < 32)
- f->fmt.pix.height = 32;
- if (f->fmt.pix.width > maxw)
- f->fmt.pix.width = maxw;
- if (f->fmt.pix.height > maxh)
- f->fmt.pix.height = maxh;
- f->fmt.pix.width &= ~0x03;
+ v4l_bound_align_image(&f->fmt.pix.width, 48, maxw, 2,
+ &f->fmt.pix.height, 32, maxh, 0, 0);
f->fmt.pix.bytesperline =
(f->fmt.pix.width * fmt->depth) >> 3;
f->fmt.pix.sizeimage =
diff --git a/drivers/media/video/sh_mobile_ceu_camera.c b/drivers/media/video/sh_mobile_ceu_camera.c
index d369e8409ab8..0db88a53d92c 100644
--- a/drivers/media/video/sh_mobile_ceu_camera.c
+++ b/drivers/media/video/sh_mobile_ceu_camera.c
@@ -689,16 +689,8 @@ static int sh_mobile_ceu_try_fmt(struct soc_camera_device *icd,
/* FIXME: calculate using depth and bus width */
- if (f->fmt.pix.height < 4)
- f->fmt.pix.height = 4;
- if (f->fmt.pix.height > 1920)
- f->fmt.pix.height = 1920;
- if (f->fmt.pix.width < 2)
- f->fmt.pix.width = 2;
- if (f->fmt.pix.width > 2560)
- f->fmt.pix.width = 2560;
- f->fmt.pix.width &= ~0x01;
- f->fmt.pix.height &= ~0x03;
+ v4l_bound_align_image(&f->fmt.pix.width, 2, 2560, 1,
+ &f->fmt.pix.height, 4, 1920, 2, 0);
f->fmt.pix.bytesperline = f->fmt.pix.width *
DIV_ROUND_UP(xlate->host_fmt->depth, 8);
diff --git a/drivers/media/video/soc_camera.c b/drivers/media/video/soc_camera.c
index 16f595d4337a..9f5ae8167855 100644
--- a/drivers/media/video/soc_camera.c
+++ b/drivers/media/video/soc_camera.c
@@ -237,11 +237,11 @@ static int soc_camera_init_user_formats(struct soc_camera_device *icd)
return -ENOMEM;
icd->num_user_formats = fmts;
- fmts = 0;
dev_dbg(&icd->dev, "Found %d supported formats.\n", fmts);
/* Second pass - actually fill data formats */
+ fmts = 0;
for (i = 0; i < icd->num_formats; i++)
if (!ici->ops->get_formats) {
icd->user_formats[i].host_fmt = icd->formats + i;
@@ -877,8 +877,11 @@ static int soc_camera_probe(struct device *dev)
(unsigned short)~0;
ret = soc_camera_init_user_formats(icd);
- if (ret < 0)
+ if (ret < 0) {
+ if (icd->ops->remove)
+ icd->ops->remove(icd);
goto eiufmt;
+ }
icd->height = DEFAULT_HEIGHT;
icd->width = DEFAULT_WIDTH;
@@ -902,8 +905,10 @@ static int soc_camera_remove(struct device *dev)
{
struct soc_camera_device *icd = to_soc_camera_dev(dev);
+ mutex_lock(&icd->video_lock);
if (icd->ops->remove)
icd->ops->remove(icd);
+ mutex_unlock(&icd->video_lock);
soc_camera_free_user_formats(icd);
@@ -1145,6 +1150,7 @@ evidallocd:
}
EXPORT_SYMBOL(soc_camera_video_start);
+/* Called from client .remove() methods with .video_lock held */
void soc_camera_video_stop(struct soc_camera_device *icd)
{
struct video_device *vdev = icd->vdev;
@@ -1154,10 +1160,8 @@ void soc_camera_video_stop(struct soc_camera_device *icd)
if (!icd->dev.parent || !vdev)
return;
- mutex_lock(&icd->video_lock);
video_unregister_device(vdev);
icd->vdev = NULL;
- mutex_unlock(&icd->video_lock);
}
EXPORT_SYMBOL(soc_camera_video_stop);
diff --git a/drivers/media/video/tcm825x.c b/drivers/media/video/tcm825x.c
index b30c49248217..b90e9da3167d 100644
--- a/drivers/media/video/tcm825x.c
+++ b/drivers/media/video/tcm825x.c
@@ -878,7 +878,7 @@ static int tcm825x_probe(struct i2c_client *client,
return rval;
}
-static int __exit tcm825x_remove(struct i2c_client *client)
+static int tcm825x_remove(struct i2c_client *client)
{
struct tcm825x_sensor *sensor = i2c_get_clientdata(client);
@@ -902,7 +902,7 @@ static struct i2c_driver tcm825x_i2c_driver = {
.name = TCM825X_NAME,
},
.probe = tcm825x_probe,
- .remove = __exit_p(tcm825x_remove),
+ .remove = tcm825x_remove,
.id_table = tcm825x_id,
};
diff --git a/drivers/media/video/usbvideo/Kconfig b/drivers/media/video/usbvideo/Kconfig
index e4cb99c1f94b..adb1c044ad7d 100644
--- a/drivers/media/video/usbvideo/Kconfig
+++ b/drivers/media/video/usbvideo/Kconfig
@@ -38,10 +38,13 @@ config USB_KONICAWC
module will be called konicawc.
config USB_QUICKCAM_MESSENGER
- tristate "USB Logitech Quickcam Messenger"
+ tristate "USB Logitech Quickcam Messenger (DEPRECATED)"
depends on VIDEO_V4L1
select VIDEO_USBVIDEO
---help---
+ This driver is DEPRECATED please use the gspca stv06xx module
+ instead.
+
Say Y or M here to enable support for the USB Logitech Quickcam
Messenger webcam.
diff --git a/drivers/media/video/v4l2-common.c b/drivers/media/video/v4l2-common.c
index f96475626da7..b91d66a767d7 100644
--- a/drivers/media/video/v4l2-common.c
+++ b/drivers/media/video/v4l2-common.c
@@ -802,6 +802,17 @@ struct v4l2_subdev *v4l2_i2c_new_subdev(struct v4l2_device *v4l2_dev,
/* Decrease the module use count to match the first try_module_get. */
module_put(client->driver->driver.owner);
+ if (sd) {
+ /* We return errors from v4l2_subdev_call only if we have the
+ callback as the .s_config is not mandatory */
+ int err = v4l2_subdev_call(sd, core, s_config, 0, NULL);
+
+ if (err && err != -ENOIOCTLCMD) {
+ v4l2_device_unregister_subdev(sd);
+ sd = NULL;
+ }
+ }
+
error:
/* If we have a client but no subdev, then something went wrong and
we must unregister the client. */
@@ -852,6 +863,17 @@ struct v4l2_subdev *v4l2_i2c_new_probed_subdev(struct v4l2_device *v4l2_dev,
/* Decrease the module use count to match the first try_module_get. */
module_put(client->driver->driver.owner);
+ if (sd) {
+ /* We return errors from v4l2_subdev_call only if we have the
+ callback as the .s_config is not mandatory */
+ int err = v4l2_subdev_call(sd, core, s_config, 0, NULL);
+
+ if (err && err != -ENOIOCTLCMD) {
+ v4l2_device_unregister_subdev(sd);
+ sd = NULL;
+ }
+ }
+
error:
/* If we have a client but no subdev, then something went wrong and
we must unregister the client. */
@@ -872,6 +894,89 @@ struct v4l2_subdev *v4l2_i2c_new_probed_subdev_addr(struct v4l2_device *v4l2_dev
}
EXPORT_SYMBOL_GPL(v4l2_i2c_new_probed_subdev_addr);
+/* Load an i2c sub-device. */
+struct v4l2_subdev *v4l2_i2c_new_subdev_board(struct v4l2_device *v4l2_dev,
+ struct i2c_adapter *adapter, const char *module_name,
+ struct i2c_board_info *info, const unsigned short *probe_addrs)
+{
+ struct v4l2_subdev *sd = NULL;
+ struct i2c_client *client;
+
+ BUG_ON(!v4l2_dev);
+
+ if (module_name)
+ request_module(module_name);
+
+ /* Create the i2c client */
+ if (info->addr == 0 && probe_addrs)
+ client = i2c_new_probed_device(adapter, info, probe_addrs);
+ else
+ client = i2c_new_device(adapter, info);
+
+ /* Note: by loading the module first we are certain that c->driver
+ will be set if the driver was found. If the module was not loaded
+ first, then the i2c core tries to delay-load the module for us,
+ and then c->driver is still NULL until the module is finally
+ loaded. This delay-load mechanism doesn't work if other drivers
+ want to use the i2c device, so explicitly loading the module
+ is the best alternative. */
+ if (client == NULL || client->driver == NULL)
+ goto error;
+
+ /* Lock the module so we can safely get the v4l2_subdev pointer */
+ if (!try_module_get(client->driver->driver.owner))
+ goto error;
+ sd = i2c_get_clientdata(client);
+
+ /* Register with the v4l2_device which increases the module's
+ use count as well. */
+ if (v4l2_device_register_subdev(v4l2_dev, sd))
+ sd = NULL;
+ /* Decrease the module use count to match the first try_module_get. */
+ module_put(client->driver->driver.owner);
+
+ if (sd) {
+ /* We return errors from v4l2_subdev_call only if we have the
+ callback as the .s_config is not mandatory */
+ int err = v4l2_subdev_call(sd, core, s_config,
+ info->irq, info->platform_data);
+
+ if (err && err != -ENOIOCTLCMD) {
+ v4l2_device_unregister_subdev(sd);
+ sd = NULL;
+ }
+ }
+
+error:
+ /* If we have a client but no subdev, then something went wrong and
+ we must unregister the client. */
+ if (client && sd == NULL)
+ i2c_unregister_device(client);
+ return sd;
+}
+EXPORT_SYMBOL_GPL(v4l2_i2c_new_subdev_board);
+
+struct v4l2_subdev *v4l2_i2c_new_subdev_cfg(struct v4l2_device *v4l2_dev,
+ struct i2c_adapter *adapter,
+ const char *module_name, const char *client_type,
+ int irq, void *platform_data,
+ u8 addr, const unsigned short *probe_addrs)
+{
+ struct i2c_board_info info;
+
+ /* Setup the i2c board info with the device type and
+ the device address. */
+ memset(&info, 0, sizeof(info));
+ strlcpy(info.type, client_type, sizeof(info.type));
+ info.addr = addr;
+ info.irq = irq;
+ info.platform_data = platform_data;
+
+ return v4l2_i2c_new_subdev_board(v4l2_dev, adapter, module_name,
+ &info, probe_addrs);
+}
+EXPORT_SYMBOL_GPL(v4l2_i2c_new_subdev_cfg);
+
/* Return i2c client address of v4l2_subdev. */
unsigned short v4l2_i2c_subdev_addr(struct v4l2_subdev *sd)
{
@@ -916,4 +1021,78 @@ const unsigned short *v4l2_i2c_tuner_addrs(enum v4l2_i2c_tuner_type type)
}
EXPORT_SYMBOL_GPL(v4l2_i2c_tuner_addrs);
-#endif
+#endif /* defined(CONFIG_I2C) */
+
+/* Clamp x to be between min and max, aligned to a multiple of 2^align. min
+ * and max don't have to be aligned, but there must be at least one valid
+ * value. E.g., min=17,max=31,align=4 is not allowed as there are no multiples
+ * of 16 between 17 and 31. */
+static unsigned int clamp_align(unsigned int x, unsigned int min,
+ unsigned int max, unsigned int align)
+{
+ /* Bits that must be zero to be aligned */
+ unsigned int mask = ~((1 << align) - 1);
+
+ /* Round to nearest aligned value */
+ if (align)
+ x = (x + (1 << (align - 1))) & mask;
+
+ /* Clamp to aligned value of min and max */
+ if (x < min)
+ x = (min + ~mask) & mask;
+ else if (x > max)
+ x = max & mask;
+
+ return x;
+}
+
+/* Bound an image to have a width between wmin and wmax, and height between
+ * hmin and hmax, inclusive. Additionally, the width will be a multiple of
+ * 2^walign, the height will be a multiple of 2^halign, and the overall size
+ * (width*height) will be a multiple of 2^salign. The image may be shrunk
+ * or enlarged to fit the alignment constraints.
+ *
+ * The width or height maximum must not be smaller than the corresponding
+ * minimum. The alignments must not be so high there are no possible image
+ * sizes within the allowed bounds. wmin and hmin must be at least 1
+ * (don't use 0). If you don't care about a certain alignment, specify 0,
+ * as 2^0 is 1 and one byte alignment is equivalent to no alignment. If
+ * you only want to adjust downward, specify a maximum that's the same as
+ * the initial value.
+ */
+void v4l_bound_align_image(u32 *w, unsigned int wmin, unsigned int wmax,
+ unsigned int walign,
+ u32 *h, unsigned int hmin, unsigned int hmax,
+ unsigned int halign, unsigned int salign)
+{
+ *w = clamp_align(*w, wmin, wmax, walign);
+ *h = clamp_align(*h, hmin, hmax, halign);
+
+ /* Usually we don't need to align the size and are done now. */
+ if (!salign)
+ return;
+
+ /* How much alignment do we have? */
+ walign = __ffs(*w);
+ halign = __ffs(*h);
+ /* Enough to satisfy the image alignment? */
+ if (walign + halign < salign) {
+ /* Max walign where there is still a valid width */
+ unsigned int wmaxa = __fls(wmax ^ (wmin - 1));
+ /* Max halign where there is still a valid height */
+ unsigned int hmaxa = __fls(hmax ^ (hmin - 1));
+
+ /* up the smaller alignment until we have enough */
+ do {
+ if (halign >= hmaxa ||
+ (walign <= halign && walign < wmaxa)) {
+ *w = clamp_align(*w, wmin, wmax, walign + 1);
+ walign = __ffs(*w);
+ } else {
+ *h = clamp_align(*h, hmin, hmax, halign + 1);
+ halign = __ffs(*h);
+ }
+ } while (halign + walign < salign);
+ }
+}
+EXPORT_SYMBOL_GPL(v4l_bound_align_image);
diff --git a/drivers/media/video/vivi.c b/drivers/media/video/vivi.c
index fbfefae7886f..7705fc6baf00 100644
--- a/drivers/media/video/vivi.c
+++ b/drivers/media/video/vivi.c
@@ -343,6 +343,53 @@ static struct bar_std bars[] = {
#define TO_U(r, g, b) \
(((-9714 * r - 19070 * g + 28784 * b + 32768) >> 16) + 128)
+/* precalculate color bar values to speed up rendering */
+static void precalculate_bars(struct vivi_fh *fh)
+{
+ struct vivi_dev *dev = fh->dev;
+ unsigned char r, g, b;
+ int k, is_yuv;
+
+ fh->input = dev->input;
+
+ for (k = 0; k < 8; k++) {
+ r = bars[fh->input].bar[k][0];
+ g = bars[fh->input].bar[k][1];
+ b = bars[fh->input].bar[k][2];
+ is_yuv = 0;
+
+ switch (fh->fmt->fourcc) {
+ case V4L2_PIX_FMT_YUYV:
+ case V4L2_PIX_FMT_UYVY:
+ is_yuv = 1;
+ break;
+ case V4L2_PIX_FMT_RGB565:
+ case V4L2_PIX_FMT_RGB565X:
+ r >>= 3;
+ g >>= 2;
+ b >>= 3;
+ break;
+ case V4L2_PIX_FMT_RGB555:
+ case V4L2_PIX_FMT_RGB555X:
+ r >>= 3;
+ g >>= 3;
+ b >>= 3;
+ break;
+ }
+
+ if (is_yuv) {
+ fh->bars[k][0] = TO_Y(r, g, b); /* Luma */
+ fh->bars[k][1] = TO_U(r, g, b); /* Cb */
+ fh->bars[k][2] = TO_V(r, g, b); /* Cr */
+ } else {
+ fh->bars[k][0] = r;
+ fh->bars[k][1] = g;
+ fh->bars[k][2] = b;
+ }
+ }
+
+}
+
#define TSTAMP_MIN_Y 24
#define TSTAMP_MAX_Y (TSTAMP_MIN_Y + 15)
#define TSTAMP_INPUT_X 10
@@ -755,6 +802,8 @@ buffer_prepare(struct videobuf_queue *vq, struct videobuf_buffer *vb,
buf->vb.height = fh->height;
buf->vb.field = field;
+ precalculate_bars(fh);
+
if (VIDEOBUF_NEEDS_INIT == buf->vb.state) {
rc = videobuf_iolock(vq, &buf->vb, NULL);
if (rc < 0)
@@ -883,15 +932,8 @@ static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
maxh = norm_maxh();
f->fmt.pix.field = field;
- if (f->fmt.pix.height < 32)
- f->fmt.pix.height = 32;
- if (f->fmt.pix.height > maxh)
- f->fmt.pix.height = maxh;
- if (f->fmt.pix.width < 48)
- f->fmt.pix.width = 48;
- if (f->fmt.pix.width > maxw)
- f->fmt.pix.width = maxw;
- f->fmt.pix.width &= ~0x03;
+ v4l_bound_align_image(&f->fmt.pix.width, 48, maxw, 2,
+ &f->fmt.pix.height, 32, maxh, 0, 0);
f->fmt.pix.bytesperline =
(f->fmt.pix.width * fmt->depth) >> 3;
f->fmt.pix.sizeimage =
@@ -900,53 +942,6 @@ static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
return 0;
}
-/* precalculate color bar values to speed up rendering */
-static void precalculate_bars(struct vivi_fh *fh)
-{
- struct vivi_dev *dev = fh->dev;
- unsigned char r, g, b;
- int k, is_yuv;
-
- fh->input = dev->input;
-
- for (k = 0; k < 8; k++) {
- r = bars[fh->input].bar[k][0];
- g = bars[fh->input].bar[k][1];
- b = bars[fh->input].bar[k][2];
- is_yuv = 0;
-
- switch (fh->fmt->fourcc) {
- case V4L2_PIX_FMT_YUYV:
- case V4L2_PIX_FMT_UYVY:
- is_yuv = 1;
- break;
- case V4L2_PIX_FMT_RGB565:
- case V4L2_PIX_FMT_RGB565X:
- r >>= 3;
- g >>= 2;
- b >>= 3;
- break;
- case V4L2_PIX_FMT_RGB555:
- case V4L2_PIX_FMT_RGB555X:
- r >>= 3;
- g >>= 3;
- b >>= 3;
- break;
- }
-
- if (is_yuv) {
- fh->bars[k][0] = TO_Y(r, g, b); /* Luma */
- fh->bars[k][1] = TO_U(r, g, b); /* Cb */
- fh->bars[k][2] = TO_V(r, g, b); /* Cr */
- } else {
- fh->bars[k][0] = r;
- fh->bars[k][1] = g;
- fh->bars[k][2] = b;
- }
- }
-
-}
-
/*FIXME: This seems to be generic enough to be at videodev2 */
static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *f)
@@ -972,8 +967,6 @@ static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
fh->vb_vidq.field = f->fmt.pix.field;
fh->type = f->type;
- precalculate_bars(fh);
-
ret = 0;
out:
mutex_unlock(&q->vb_lock);
@@ -1364,6 +1357,7 @@ static int __init vivi_create_instance(int inst)
goto unreg_dev;
*vfd = vivi_template;
+ vfd->debug = debug;
ret = video_register_device(vfd, VFL_TYPE_GRABBER, video_nr);
if (ret < 0)
diff --git a/drivers/media/video/w9968cf.c b/drivers/media/video/w9968cf.c
index f59b2bd07e89..6c3f23e31b5c 100644
--- a/drivers/media/video/w9968cf.c
+++ b/drivers/media/video/w9968cf.c
@@ -460,7 +460,7 @@ static int w9968cf_set_picture(struct w9968cf_device*, struct video_picture);
static int w9968cf_set_window(struct w9968cf_device*, struct video_window);
static int w9968cf_postprocess_frame(struct w9968cf_device*,
struct w9968cf_frame_t*);
-static int w9968cf_adjust_window_size(struct w9968cf_device*, u16* w, u16* h);
+static int w9968cf_adjust_window_size(struct w9968cf_device*, u32 *w, u32 *h);
static void w9968cf_init_framelist(struct w9968cf_device*);
static void w9968cf_push_frame(struct w9968cf_device*, u8 f_num);
static void w9968cf_pop_frame(struct w9968cf_device*,struct w9968cf_frame_t**);
@@ -1763,8 +1763,7 @@ w9968cf_set_window(struct w9968cf_device* cam, struct video_window win)
#define UNSC(x) ((x) >> 10)
/* Make sure we are using a supported resolution */
- if ((err = w9968cf_adjust_window_size(cam, (u16*)&win.width,
- (u16*)&win.height)))
+ if ((err = w9968cf_adjust_window_size(cam, &win.width, &win.height)))
goto error;
/* Scaling factors */
@@ -1914,12 +1913,9 @@ error:
Return 0 on success, -1 otherwise.
--------------------------------------------------------------------------*/
static int
-w9968cf_adjust_window_size(struct w9968cf_device* cam, u16* width, u16* height)
+w9968cf_adjust_window_size(struct w9968cf_device *cam, u32 *width, u32 *height)
{
- u16 maxw, maxh;
-
- if ((*width < cam->minwidth) || (*height < cam->minheight))
- return -ERANGE;
+ unsigned int maxw, maxh, align;
maxw = cam->upscaling && !(cam->vpp_flag & VPP_DECOMPRESSION) &&
w9968cf_vpp ? max((u16)W9968CF_MAX_WIDTH, cam->maxwidth)
@@ -1927,16 +1923,10 @@ w9968cf_adjust_window_size(struct w9968cf_device* cam, u16* width, u16* height)
maxh = cam->upscaling && !(cam->vpp_flag & VPP_DECOMPRESSION) &&
w9968cf_vpp ? max((u16)W9968CF_MAX_HEIGHT, cam->maxheight)
: cam->maxheight;
+ align = (cam->vpp_flag & VPP_DECOMPRESSION) ? 4 : 0;
- if (*width > maxw)
- *width = maxw;
- if (*height > maxh)
- *height = maxh;
-
- if (cam->vpp_flag & VPP_DECOMPRESSION) {
- *width &= ~15L; /* multiple of 16 */
- *height &= ~15L;
- }
+ v4l_bound_align_image(width, cam->minwidth, maxw, align,
+ height, cam->minheight, maxh, align, 0);
PDBGG("Window size adjusted w=%u, h=%u ", *width, *height)
@@ -3043,8 +3033,8 @@ static long w9968cf_v4l_ioctl(struct file *filp,
if (win.clipcount != 0 || win.flags != 0)
return -EINVAL;
- if ((err = w9968cf_adjust_window_size(cam, (u16*)&win.width,
- (u16*)&win.height))) {
+ if ((err = w9968cf_adjust_window_size(cam, &win.width,
+ &win.height))) {
DBG(4, "Resolution not supported (%ux%u). "
"VIDIOCSWIN failed", win.width, win.height)
return err;
@@ -3116,6 +3106,7 @@ static long w9968cf_v4l_ioctl(struct file *filp,
{
struct video_mmap mmap;
struct w9968cf_frame_t* fr;
+ u32 w, h;
int err = 0;
if (copy_from_user(&mmap, arg, sizeof(mmap)))
@@ -3164,8 +3155,10 @@ static long w9968cf_v4l_ioctl(struct file *filp,
}
}
- if ((err = w9968cf_adjust_window_size(cam, (u16*)&mmap.width,
- (u16*)&mmap.height))) {
+ w = mmap.width; h = mmap.height;
+ err = w9968cf_adjust_window_size(cam, &w, &h);
+ mmap.width = w; mmap.height = h;
+ if (err) {
DBG(4, "Resolution not supported (%dx%d). "
"VIDIOCMCAPTURE failed",
mmap.width, mmap.height)
diff --git a/drivers/media/video/zoran/zoran_driver.c b/drivers/media/video/zoran/zoran_driver.c
index 643cccaa1aab..3d7df32a3d87 100644
--- a/drivers/media/video/zoran/zoran_driver.c
+++ b/drivers/media/video/zoran/zoran_driver.c
@@ -2088,16 +2088,10 @@ static int zoran_try_fmt_vid_cap(struct file *file, void *__fh,
return -EINVAL;
}
- bpp = (zoran_formats[i].depth + 7) / 8;
- fmt->fmt.pix.width &= ~((bpp == 2) ? 1 : 3);
- if (fmt->fmt.pix.width > BUZ_MAX_WIDTH)
- fmt->fmt.pix.width = BUZ_MAX_WIDTH;
- if (fmt->fmt.pix.width < BUZ_MIN_WIDTH)
- fmt->fmt.pix.width = BUZ_MIN_WIDTH;
- if (fmt->fmt.pix.height > BUZ_MAX_HEIGHT)
- fmt->fmt.pix.height = BUZ_MAX_HEIGHT;
- if (fmt->fmt.pix.height < BUZ_MIN_HEIGHT)
- fmt->fmt.pix.height = BUZ_MIN_HEIGHT;
+ bpp = DIV_ROUND_UP(zoran_formats[i].depth, 8);
+ v4l_bound_align_image(
+ &fmt->fmt.pix.width, BUZ_MIN_WIDTH, BUZ_MAX_WIDTH, bpp == 2 ? 1 : 2,
+ &fmt->fmt.pix.height, BUZ_MIN_HEIGHT, BUZ_MAX_HEIGHT, 0, 0);
mutex_unlock(&zr->resource_lock);
return 0;
diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
index 20e0b447e8e8..55ff25244af4 100644
--- a/drivers/message/fusion/mptsas.c
+++ b/drivers/message/fusion/mptsas.c
@@ -3518,7 +3518,7 @@ retry_page:
} else
mptsas_volume_delete(ioc, sas_info->fw.id);
}
- mutex_lock(&ioc->sas_device_info_mutex);
+ mutex_unlock(&ioc->sas_device_info_mutex);
/* expanders */
mutex_lock(&ioc->sas_topology_mutex);
@@ -3549,7 +3549,7 @@ retry_page:
goto redo_expander_scan;
}
}
- mutex_lock(&ioc->sas_topology_mutex);
+ mutex_unlock(&ioc->sas_topology_mutex);
}
/**
diff --git a/drivers/mfd/dm355evm_msp.c b/drivers/mfd/dm355evm_msp.c
index 7ac12cb0be4a..5b6e58a3ba46 100644
--- a/drivers/mfd/dm355evm_msp.c
+++ b/drivers/mfd/dm355evm_msp.c
@@ -32,8 +32,7 @@
* This driver was tested with firmware revision A4.
*/
-#if defined(CONFIG_KEYBOARD_DM355EVM) \
- || defined(CONFIG_KEYBOARD_DM355EVM_MODULE)
+#if defined(CONFIG_INPUT_DM355EVM) || defined(CONFIG_INPUT_DM355EVM_MODULE)
#define msp_has_keyboard() true
#else
#define msp_has_keyboard() false
diff --git a/drivers/mfd/ezx-pcap.c b/drivers/mfd/ezx-pcap.c
index 671a7efe86a8..c1de4afa89a6 100644
--- a/drivers/mfd/ezx-pcap.c
+++ b/drivers/mfd/ezx-pcap.c
@@ -238,8 +238,10 @@ static irqreturn_t pcap_adc_irq(int irq, void *_pcap)
mutex_lock(&pcap->adc_mutex);
req = pcap->adc_queue[pcap->adc_head];
- if (WARN(!req, KERN_WARNING "adc irq without pending request\n"))
+ if (WARN(!req, KERN_WARNING "adc irq without pending request\n")) {
+ mutex_unlock(&pcap->adc_mutex);
return IRQ_HANDLED;
+ }
/* read requested channels results */
ezx_pcap_read(pcap, PCAP_REG_ADC, &tmp);
diff --git a/drivers/mfd/sm501.c b/drivers/mfd/sm501.c
index 4c7b7962f6b8..0cc5eeff5ee8 100644
--- a/drivers/mfd/sm501.c
+++ b/drivers/mfd/sm501.c
@@ -367,7 +367,8 @@ int sm501_unit_power(struct device *dev, unsigned int unit, unsigned int to)
break;
default:
- return -1;
+ gate = -1;
+ goto already;
}
writel(mode, sm->regs + SM501_POWER_MODE_CONTROL);
diff --git a/drivers/mfd/twl4030-core.c b/drivers/mfd/twl4030-core.c
index cd1008c19cd7..ca54996ffd0e 100644
--- a/drivers/mfd/twl4030-core.c
+++ b/drivers/mfd/twl4030-core.c
@@ -101,6 +101,12 @@
#define twl_has_usb() false
#endif
+#if defined(CONFIG_TWL4030_WATCHDOG) || \
+ defined(CONFIG_TWL4030_WATCHDOG_MODULE)
+#define twl_has_watchdog() true
+#else
+#define twl_has_watchdog() false
+#endif
/* Triton Core internal information (BEGIN) */
@@ -526,6 +532,12 @@ add_children(struct twl4030_platform_data *pdata, unsigned long features)
usb_transceiver = child;
}
+ if (twl_has_watchdog()) {
+ child = add_child(0, "twl4030_wdt", NULL, 0, false, 0, 0);
+ if (IS_ERR(child))
+ return PTR_ERR(child);
+ }
+
if (twl_has_regulator()) {
/*
child = add_regulator(TWL4030_REG_VPLL1, pdata->vpll1);
diff --git a/drivers/misc/sgi-xp/xpnet.c b/drivers/misc/sgi-xp/xpnet.c
index 8d1c60a3f0df..5d778ec8cdb2 100644
--- a/drivers/misc/sgi-xp/xpnet.c
+++ b/drivers/misc/sgi-xp/xpnet.c
@@ -235,7 +235,7 @@ xpnet_receive(short partid, int channel, struct xpnet_message *msg)
skb->ip_summed = CHECKSUM_UNNECESSARY;
dev_dbg(xpnet, "passing skb to network layer\n"
- KERN_DEBUG "\tskb->head=0x%p skb->data=0x%p skb->tail=0x%p "
+ "\tskb->head=0x%p skb->data=0x%p skb->tail=0x%p "
"skb->end=0x%p skb->len=%d\n",
(void *)skb->head, (void *)skb->data, skb_tail_pointer(skb),
skb_end_pointer(skb), skb->len);
@@ -399,7 +399,7 @@ xpnet_send(struct sk_buff *skb, struct xpnet_pending_msg *queued_msg,
msg->buf_pa = xp_pa((void *)start_addr);
dev_dbg(xpnet, "sending XPC message to %d:%d\n"
- KERN_DEBUG "msg->buf_pa=0x%lx, msg->size=%u, "
+ "msg->buf_pa=0x%lx, msg->size=%u, "
"msg->leadin_ignore=%u, msg->tailout_ignore=%u\n",
dest_partid, XPC_NET_CHANNEL, msg->buf_pa, msg->size,
msg->leadin_ignore, msg->tailout_ignore);
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
index 40111a6d8d5b..891ef18bd77b 100644
--- a/drivers/mmc/host/Kconfig
+++ b/drivers/mmc/host/Kconfig
@@ -94,6 +94,31 @@ config MMC_SDHCI_PLTFM
If unsure, say N.
+config MMC_SDHCI_S3C
+ tristate "SDHCI support on Samsung S3C SoC"
+ depends on MMC_SDHCI && (PLAT_S3C24XX || PLAT_S3C64XX)
+ help
+ This selects the Secure Digital Host Controller Interface (SDHCI)
+ often referrered to as the HSMMC block in some of the Samsung S3C
+ range of SoC.
+
+ Note, due to the problems with DMA, the DMA support is only
+ available with CONFIG_EXPERIMENTAL is selected.
+
+ If you have a controller with this interface, say Y or M here.
+
+ If unsure, say N.
+
+config MMC_SDHCI_S3C_DMA
+ bool "DMA support on S3C SDHCI"
+ depends on MMC_SDHCI_S3C && EXPERIMENTAL
+ help
+ Enable DMA support on the Samsung S3C SDHCI glue. The DMA
+ has proved to be problematic if the controller encounters
+ certain errors, and thus should be treated with care.
+
+ YMMV.
+
config MMC_OMAP
tristate "TI OMAP Multimedia Card Interface support"
depends on ARCH_OMAP
@@ -265,3 +290,14 @@ config MMC_CB710
This driver can also be built as a module. If so, the module
will be called cb710-mmc.
+config MMC_VIA_SDMMC
+ tristate "VIA SD/MMC Card Reader Driver"
+ depends on PCI
+ help
+ This selects the VIA SD/MMC Card Reader driver, say Y or M here.
+ VIA provides one multi-functional card reader which integrated into
+ some motherboards manufactured by VIA. This card reader supports
+ SD/MMC/SDHC.
+ If you have a controller with this interface, say Y or M here.
+
+ If unsure, say N.
diff --git a/drivers/mmc/host/Makefile b/drivers/mmc/host/Makefile
index 79da397c5fea..cf153f628457 100644
--- a/drivers/mmc/host/Makefile
+++ b/drivers/mmc/host/Makefile
@@ -15,6 +15,7 @@ obj-$(CONFIG_MMC_SDHCI_PCI) += sdhci-pci.o
obj-$(CONFIG_MMC_RICOH_MMC) += ricoh_mmc.o
obj-$(CONFIG_MMC_SDHCI_OF) += sdhci-of.o
obj-$(CONFIG_MMC_SDHCI_PLTFM) += sdhci-pltfm.o
+obj-$(CONFIG_MMC_SDHCI_S3C) += sdhci-s3c.o
obj-$(CONFIG_MMC_WBSD) += wbsd.o
obj-$(CONFIG_MMC_AU1X) += au1xmmc.o
obj-$(CONFIG_MMC_OMAP) += omap.o
@@ -31,6 +32,7 @@ obj-$(CONFIG_MMC_S3C) += s3cmci.o
obj-$(CONFIG_MMC_SDRICOH_CS) += sdricoh_cs.o
obj-$(CONFIG_MMC_TMIO) += tmio_mmc.o
obj-$(CONFIG_MMC_CB710) += cb710-mmc.o
+obj-$(CONFIG_MMC_VIA_SDMMC) += via-sdmmc.o
ifeq ($(CONFIG_CB710_DEBUG),y)
CFLAGS-cb710-mmc += -DDEBUG
diff --git a/drivers/mmc/host/mmc_spi.c b/drivers/mmc/host/mmc_spi.c
index 240608cc7ae9..a461017ce5ce 100644
--- a/drivers/mmc/host/mmc_spi.c
+++ b/drivers/mmc/host/mmc_spi.c
@@ -1313,6 +1313,12 @@ static int mmc_spi_probe(struct spi_device *spi)
struct mmc_spi_host *host;
int status;
+ /* We rely on full duplex transfers, mostly to reduce
+ * per-transfer overheads (by making fewer transfers).
+ */
+ if (spi->master->flags & SPI_MASTER_HALF_DUPLEX)
+ return -EINVAL;
+
/* MMC and SD specs only seem to care that sampling is on the
* rising edge ... meaning SPI modes 0 or 3. So either SPI mode
* should be legit. We'll use mode 0 since the steady state is 0,
diff --git a/drivers/mmc/host/s3cmci.c b/drivers/mmc/host/s3cmci.c
index 4eb4f37544ab..8c08cd7efa7f 100644
--- a/drivers/mmc/host/s3cmci.c
+++ b/drivers/mmc/host/s3cmci.c
@@ -794,7 +794,7 @@ static void s3cmci_dma_setup(struct s3cmci_host *host,
host->mem->start + host->sdidata);
if (!setup_ok) {
- s3c2410_dma_config(host->dma, 4, 0);
+ s3c2410_dma_config(host->dma, 4);
s3c2410_dma_set_buffdone_fn(host->dma,
s3cmci_dma_done_callback);
s3c2410_dma_setflags(host->dma, S3C2410_DMAF_AUTOSTART);
diff --git a/drivers/mmc/host/sdhci-of.c b/drivers/mmc/host/sdhci-of.c
index 128c614d11aa..d79fa55c3b89 100644
--- a/drivers/mmc/host/sdhci-of.c
+++ b/drivers/mmc/host/sdhci-of.c
@@ -250,6 +250,9 @@ static int __devinit sdhci_of_probe(struct of_device *ofdev,
host->ops = &sdhci_of_data->ops;
}
+ if (of_get_property(np, "sdhci,1-bit-only", NULL))
+ host->quirks |= SDHCI_QUIRK_FORCE_1_BIT_DATA;
+
clk = of_get_property(np, "clock-frequency", &size);
if (clk && size == sizeof(*clk) && *clk)
of_host->clock = *clk;
diff --git a/drivers/mmc/host/sdhci-pci.c b/drivers/mmc/host/sdhci-pci.c
index 65be27995d5c..2f15cc17d887 100644
--- a/drivers/mmc/host/sdhci-pci.c
+++ b/drivers/mmc/host/sdhci-pci.c
@@ -284,6 +284,18 @@ static const struct sdhci_pci_fixes sdhci_jmicron = {
.resume = jmicron_resume,
};
+static int via_probe(struct sdhci_pci_chip *chip)
+{
+ if (chip->pdev->revision == 0x10)
+ chip->quirks |= SDHCI_QUIRK_DELAY_AFTER_POWER;
+
+ return 0;
+}
+
+static const struct sdhci_pci_fixes sdhci_via = {
+ .probe = via_probe,
+};
+
static const struct pci_device_id pci_ids[] __devinitdata = {
{
.vendor = PCI_VENDOR_ID_RICOH,
@@ -349,6 +361,14 @@ static const struct pci_device_id pci_ids[] __devinitdata = {
.driver_data = (kernel_ulong_t)&sdhci_jmicron,
},
+ {
+ .vendor = PCI_VENDOR_ID_VIA,
+ .device = 0x95d0,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .driver_data = (kernel_ulong_t)&sdhci_via,
+ },
+
{ /* Generic SD host controller */
PCI_DEVICE_CLASS((PCI_CLASS_SYSTEM_SDHCI << 8), 0xFFFF00)
},
diff --git a/drivers/mmc/host/sdhci-s3c.c b/drivers/mmc/host/sdhci-s3c.c
new file mode 100644
index 000000000000..50997d2a63e7
--- /dev/null
+++ b/drivers/mmc/host/sdhci-s3c.c
@@ -0,0 +1,428 @@
+/* linux/drivers/mmc/host/sdhci-s3c.c
+ *
+ * Copyright 2008 Openmoko Inc.
+ * Copyright 2008 Simtec Electronics
+ * Ben Dooks <ben@simtec.co.uk>
+ * http://armlinux.simtec.co.uk/
+ *
+ * SDHCI (HSMMC) support for Samsung SoC
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+
+#include <linux/mmc/host.h>
+
+#include <plat/sdhci.h>
+#include <plat/regs-sdhci.h>
+
+#include "sdhci.h"
+
+#define MAX_BUS_CLK (4)
+
+/**
+ * struct sdhci_s3c - S3C SDHCI instance
+ * @host: The SDHCI host created
+ * @pdev: The platform device we where created from.
+ * @ioarea: The resource created when we claimed the IO area.
+ * @pdata: The platform data for this controller.
+ * @cur_clk: The index of the current bus clock.
+ * @clk_io: The clock for the internal bus interface.
+ * @clk_bus: The clocks that are available for the SD/MMC bus clock.
+ */
+struct sdhci_s3c {
+ struct sdhci_host *host;
+ struct platform_device *pdev;
+ struct resource *ioarea;
+ struct s3c_sdhci_platdata *pdata;
+ unsigned int cur_clk;
+
+ struct clk *clk_io;
+ struct clk *clk_bus[MAX_BUS_CLK];
+};
+
+static inline struct sdhci_s3c *to_s3c(struct sdhci_host *host)
+{
+ return sdhci_priv(host);
+}
+
+/**
+ * get_curclk - convert ctrl2 register to clock source number
+ * @ctrl2: Control2 register value.
+ */
+static u32 get_curclk(u32 ctrl2)
+{
+ ctrl2 &= S3C_SDHCI_CTRL2_SELBASECLK_MASK;
+ ctrl2 >>= S3C_SDHCI_CTRL2_SELBASECLK_SHIFT;
+
+ return ctrl2;
+}
+
+static void sdhci_s3c_check_sclk(struct sdhci_host *host)
+{
+ struct sdhci_s3c *ourhost = to_s3c(host);
+ u32 tmp = readl(host->ioaddr + S3C_SDHCI_CONTROL2);
+
+ if (get_curclk(tmp) != ourhost->cur_clk) {
+ dev_dbg(&ourhost->pdev->dev, "restored ctrl2 clock setting\n");
+
+ tmp &= ~S3C_SDHCI_CTRL2_SELBASECLK_MASK;
+ tmp |= ourhost->cur_clk << S3C_SDHCI_CTRL2_SELBASECLK_SHIFT;
+ writel(tmp, host->ioaddr + 0x80);
+ }
+}
+
+/**
+ * sdhci_s3c_get_max_clk - callback to get maximum clock frequency.
+ * @host: The SDHCI host instance.
+ *
+ * Callback to return the maximum clock rate acheivable by the controller.
+*/
+static unsigned int sdhci_s3c_get_max_clk(struct sdhci_host *host)
+{
+ struct sdhci_s3c *ourhost = to_s3c(host);
+ struct clk *busclk;
+ unsigned int rate, max;
+ int clk;
+
+ /* note, a reset will reset the clock source */
+
+ sdhci_s3c_check_sclk(host);
+
+ for (max = 0, clk = 0; clk < MAX_BUS_CLK; clk++) {
+ busclk = ourhost->clk_bus[clk];
+ if (!busclk)
+ continue;
+
+ rate = clk_get_rate(busclk);
+ if (rate > max)
+ max = rate;
+ }
+
+ return max;
+}
+
+static unsigned int sdhci_s3c_get_timeout_clk(struct sdhci_host *host)
+{
+ return sdhci_s3c_get_max_clk(host) / 1000000;
+}
+
+/**
+ * sdhci_s3c_consider_clock - consider one the bus clocks for current setting
+ * @ourhost: Our SDHCI instance.
+ * @src: The source clock index.
+ * @wanted: The clock frequency wanted.
+ */
+static unsigned int sdhci_s3c_consider_clock(struct sdhci_s3c *ourhost,
+ unsigned int src,
+ unsigned int wanted)
+{
+ unsigned long rate;
+ struct clk *clksrc = ourhost->clk_bus[src];
+ int div;
+
+ if (!clksrc)
+ return UINT_MAX;
+
+ rate = clk_get_rate(clksrc);
+
+ for (div = 1; div < 256; div *= 2) {
+ if ((rate / div) <= wanted)
+ break;
+ }
+
+ dev_dbg(&ourhost->pdev->dev, "clk %d: rate %ld, want %d, got %ld\n",
+ src, rate, wanted, rate / div);
+
+ return (wanted - (rate / div));
+}
+
+/**
+ * sdhci_s3c_set_clock - callback on clock change
+ * @host: The SDHCI host being changed
+ * @clock: The clock rate being requested.
+ *
+ * When the card's clock is going to be changed, look at the new frequency
+ * and find the best clock source to go with it.
+*/
+static void sdhci_s3c_set_clock(struct sdhci_host *host, unsigned int clock)
+{
+ struct sdhci_s3c *ourhost = to_s3c(host);
+ unsigned int best = UINT_MAX;
+ unsigned int delta;
+ int best_src = 0;
+ int src;
+ u32 ctrl;
+
+ /* don't bother if the clock is going off. */
+ if (clock == 0)
+ return;
+
+ for (src = 0; src < MAX_BUS_CLK; src++) {
+ delta = sdhci_s3c_consider_clock(ourhost, src, clock);
+ if (delta < best) {
+ best = delta;
+ best_src = src;
+ }
+ }
+
+ dev_dbg(&ourhost->pdev->dev,
+ "selected source %d, clock %d, delta %d\n",
+ best_src, clock, best);
+
+ /* select the new clock source */
+
+ if (ourhost->cur_clk != best_src) {
+ struct clk *clk = ourhost->clk_bus[best_src];
+
+ /* turn clock off to card before changing clock source */
+ writew(0, host->ioaddr + SDHCI_CLOCK_CONTROL);
+
+ ourhost->cur_clk = best_src;
+ host->max_clk = clk_get_rate(clk);
+ host->timeout_clk = sdhci_s3c_get_timeout_clk(host);
+
+ ctrl = readl(host->ioaddr + S3C_SDHCI_CONTROL2);
+ ctrl &= ~S3C_SDHCI_CTRL2_SELBASECLK_MASK;
+ ctrl |= best_src << S3C_SDHCI_CTRL2_SELBASECLK_SHIFT;
+ writel(ctrl, host->ioaddr + S3C_SDHCI_CONTROL2);
+ }
+
+ /* reconfigure the hardware for new clock rate */
+
+ {
+ struct mmc_ios ios;
+
+ ios.clock = clock;
+
+ if (ourhost->pdata->cfg_card)
+ (ourhost->pdata->cfg_card)(ourhost->pdev, host->ioaddr,
+ &ios, NULL);
+ }
+}
+
+static struct sdhci_ops sdhci_s3c_ops = {
+ .get_max_clock = sdhci_s3c_get_max_clk,
+ .get_timeout_clock = sdhci_s3c_get_timeout_clk,
+ .set_clock = sdhci_s3c_set_clock,
+};
+
+static int __devinit sdhci_s3c_probe(struct platform_device *pdev)
+{
+ struct s3c_sdhci_platdata *pdata = pdev->dev.platform_data;
+ struct device *dev = &pdev->dev;
+ struct sdhci_host *host;
+ struct sdhci_s3c *sc;
+ struct resource *res;
+ int ret, irq, ptr, clks;
+
+ if (!pdata) {
+ dev_err(dev, "no device data specified\n");
+ return -ENOENT;
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(dev, "no irq specified\n");
+ return irq;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(dev, "no memory specified\n");
+ return -ENOENT;
+ }
+
+ host = sdhci_alloc_host(dev, sizeof(struct sdhci_s3c));
+ if (IS_ERR(host)) {
+ dev_err(dev, "sdhci_alloc_host() failed\n");
+ return PTR_ERR(host);
+ }
+
+ sc = sdhci_priv(host);
+
+ sc->host = host;
+ sc->pdev = pdev;
+ sc->pdata = pdata;
+
+ platform_set_drvdata(pdev, host);
+
+ sc->clk_io = clk_get(dev, "hsmmc");
+ if (IS_ERR(sc->clk_io)) {
+ dev_err(dev, "failed to get io clock\n");
+ ret = PTR_ERR(sc->clk_io);
+ goto err_io_clk;
+ }
+
+ /* enable the local io clock and keep it running for the moment. */
+ clk_enable(sc->clk_io);
+
+ for (clks = 0, ptr = 0; ptr < MAX_BUS_CLK; ptr++) {
+ struct clk *clk;
+ char *name = pdata->clocks[ptr];
+
+ if (name == NULL)
+ continue;
+
+ clk = clk_get(dev, name);
+ if (IS_ERR(clk)) {
+ dev_err(dev, "failed to get clock %s\n", name);
+ continue;
+ }
+
+ clks++;
+ sc->clk_bus[ptr] = clk;
+ clk_enable(clk);
+
+ dev_info(dev, "clock source %d: %s (%ld Hz)\n",
+ ptr, name, clk_get_rate(clk));
+ }
+
+ if (clks == 0) {
+ dev_err(dev, "failed to find any bus clocks\n");
+ ret = -ENOENT;
+ goto err_no_busclks;
+ }
+
+ sc->ioarea = request_mem_region(res->start, resource_size(res),
+ mmc_hostname(host->mmc));
+ if (!sc->ioarea) {
+ dev_err(dev, "failed to reserve register area\n");
+ ret = -ENXIO;
+ goto err_req_regs;
+ }
+
+ host->ioaddr = ioremap_nocache(res->start, resource_size(res));
+ if (!host->ioaddr) {
+ dev_err(dev, "failed to map registers\n");
+ ret = -ENXIO;
+ goto err_req_regs;
+ }
+
+ /* Ensure we have minimal gpio selected CMD/CLK/Detect */
+ if (pdata->cfg_gpio)
+ pdata->cfg_gpio(pdev, pdata->max_width);
+
+ host->hw_name = "samsung-hsmmc";
+ host->ops = &sdhci_s3c_ops;
+ host->quirks = 0;
+ host->irq = irq;
+
+ /* Setup quirks for the controller */
+
+ /* Currently with ADMA enabled we are getting some length
+ * interrupts that are not being dealt with, do disable
+ * ADMA until this is sorted out. */
+ host->quirks |= SDHCI_QUIRK_BROKEN_ADMA;
+ host->quirks |= SDHCI_QUIRK_32BIT_ADMA_SIZE;
+
+#ifndef CONFIG_MMC_SDHCI_S3C_DMA
+
+ /* we currently see overruns on errors, so disable the SDMA
+ * support as well. */
+ host->quirks |= SDHCI_QUIRK_BROKEN_DMA;
+
+ /* PIO currently has problems with multi-block IO */
+ host->quirks |= SDHCI_QUIRK_NO_MULTIBLOCK;
+
+#endif /* CONFIG_MMC_SDHCI_S3C_DMA */
+
+ /* It seems we do not get an DATA transfer complete on non-busy
+ * transfers, not sure if this is a problem with this specific
+ * SDHCI block, or a missing configuration that needs to be set. */
+ host->quirks |= SDHCI_QUIRK_NO_BUSY_IRQ;
+
+ host->quirks |= (SDHCI_QUIRK_32BIT_DMA_ADDR |
+ SDHCI_QUIRK_32BIT_DMA_SIZE);
+
+ ret = sdhci_add_host(host);
+ if (ret) {
+ dev_err(dev, "sdhci_add_host() failed\n");
+ goto err_add_host;
+ }
+
+ return 0;
+
+ err_add_host:
+ release_resource(sc->ioarea);
+ kfree(sc->ioarea);
+
+ err_req_regs:
+ for (ptr = 0; ptr < MAX_BUS_CLK; ptr++) {
+ clk_disable(sc->clk_bus[ptr]);
+ clk_put(sc->clk_bus[ptr]);
+ }
+
+ err_no_busclks:
+ clk_disable(sc->clk_io);
+ clk_put(sc->clk_io);
+
+ err_io_clk:
+ sdhci_free_host(host);
+
+ return ret;
+}
+
+static int __devexit sdhci_s3c_remove(struct platform_device *pdev)
+{
+ return 0;
+}
+
+#ifdef CONFIG_PM
+
+static int sdhci_s3c_suspend(struct platform_device *dev, pm_message_t pm)
+{
+ struct sdhci_host *host = platform_get_drvdata(dev);
+
+ sdhci_suspend_host(host, pm);
+ return 0;
+}
+
+static int sdhci_s3c_resume(struct platform_device *dev)
+{
+ struct sdhci_host *host = platform_get_drvdata(dev);
+
+ sdhci_resume_host(host);
+ return 0;
+}
+
+#else
+#define sdhci_s3c_suspend NULL
+#define sdhci_s3c_resume NULL
+#endif
+
+static struct platform_driver sdhci_s3c_driver = {
+ .probe = sdhci_s3c_probe,
+ .remove = __devexit_p(sdhci_s3c_remove),
+ .suspend = sdhci_s3c_suspend,
+ .resume = sdhci_s3c_resume,
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "s3c-sdhci",
+ },
+};
+
+static int __init sdhci_s3c_init(void)
+{
+ return platform_driver_register(&sdhci_s3c_driver);
+}
+
+static void __exit sdhci_s3c_exit(void)
+{
+ platform_driver_unregister(&sdhci_s3c_driver);
+}
+
+module_init(sdhci_s3c_init);
+module_exit(sdhci_s3c_exit);
+
+MODULE_DESCRIPTION("Samsung SDHCI (HSMMC) glue");
+MODULE_AUTHOR("Ben Dooks, <ben@simtec.co.uk>");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:s3c-sdhci");
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index 35789c6edc19..6779b4ecab18 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -584,7 +584,7 @@ static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_data *data)
* longer to time out, but that's much better than having a too-short
* timeout value.
*/
- if ((host->quirks & SDHCI_QUIRK_BROKEN_TIMEOUT_VAL))
+ if (host->quirks & SDHCI_QUIRK_BROKEN_TIMEOUT_VAL)
return 0xE;
/* timeout in us */
@@ -1051,12 +1051,19 @@ static void sdhci_set_power(struct sdhci_host *host, unsigned short power)
* At least the Marvell CaFe chip gets confused if we set the voltage
* and set turn on power at the same time, so set the voltage first.
*/
- if ((host->quirks & SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER))
+ if (host->quirks & SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER)
sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL);
pwr |= SDHCI_POWER_ON;
sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL);
+
+ /*
+ * Some controllers need an extra 10ms delay of 10ms before they
+ * can apply clock after applying power
+ */
+ if (host->quirks & SDHCI_QUIRK_DELAY_AFTER_POWER)
+ mdelay(10);
}
/*****************************************************************************\
@@ -1382,6 +1389,35 @@ static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask)
sdhci_finish_command(host);
}
+#ifdef DEBUG
+static void sdhci_show_adma_error(struct sdhci_host *host)
+{
+ const char *name = mmc_hostname(host->mmc);
+ u8 *desc = host->adma_desc;
+ __le32 *dma;
+ __le16 *len;
+ u8 attr;
+
+ sdhci_dumpregs(host);
+
+ while (true) {
+ dma = (__le32 *)(desc + 4);
+ len = (__le16 *)(desc + 2);
+ attr = *desc;
+
+ DBG("%s: %p: DMA 0x%08x, LEN 0x%04x, Attr=0x%02x\n",
+ name, desc, le32_to_cpu(*dma), le16_to_cpu(*len), attr);
+
+ desc += 8;
+
+ if (attr & 2)
+ break;
+ }
+}
+#else
+static void sdhci_show_adma_error(struct sdhci_host *host) { }
+#endif
+
static void sdhci_data_irq(struct sdhci_host *host, u32 intmask)
{
BUG_ON(intmask == 0);
@@ -1411,8 +1447,11 @@ static void sdhci_data_irq(struct sdhci_host *host, u32 intmask)
host->data->error = -ETIMEDOUT;
else if (intmask & (SDHCI_INT_DATA_CRC | SDHCI_INT_DATA_END_BIT))
host->data->error = -EILSEQ;
- else if (intmask & SDHCI_INT_ADMA_ERROR)
+ else if (intmask & SDHCI_INT_ADMA_ERROR) {
+ printk(KERN_ERR "%s: ADMA error\n", mmc_hostname(host->mmc));
+ sdhci_show_adma_error(host);
host->data->error = -EIO;
+ }
if (host->data->error)
sdhci_finish_data(host);
@@ -1729,7 +1768,10 @@ int sdhci_add_host(struct sdhci_host *host)
mmc->ops = &sdhci_ops;
mmc->f_min = host->max_clk / 256;
mmc->f_max = host->max_clk;
- mmc->caps = MMC_CAP_4_BIT_DATA | MMC_CAP_SDIO_IRQ;
+ mmc->caps = MMC_CAP_SDIO_IRQ;
+
+ if (!(host->quirks & SDHCI_QUIRK_FORCE_1_BIT_DATA))
+ mmc->caps |= MMC_CAP_4_BIT_DATA;
if (caps & SDHCI_CAN_DO_HISPD)
mmc->caps |= MMC_CAP_SD_HIGHSPEED;
@@ -1802,7 +1844,7 @@ int sdhci_add_host(struct sdhci_host *host)
/*
* Maximum block count.
*/
- mmc->max_blk_count = 65535;
+ mmc->max_blk_count = (host->quirks & SDHCI_QUIRK_NO_MULTIBLOCK) ? 1 : 65535;
/*
* Init tasklets.
diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
index 2de08349c3ca..831ddf7dcb49 100644
--- a/drivers/mmc/host/sdhci.h
+++ b/drivers/mmc/host/sdhci.h
@@ -226,6 +226,12 @@ struct sdhci_host {
#define SDHCI_QUIRK_RESTORE_IRQS_AFTER_RESET (1<<19)
/* Controller has to be forced to use block size of 2048 bytes */
#define SDHCI_QUIRK_FORCE_BLK_SZ_2048 (1<<20)
+/* Controller cannot do multi-block transfers */
+#define SDHCI_QUIRK_NO_MULTIBLOCK (1<<21)
+/* Controller can only handle 1-bit data transfers */
+#define SDHCI_QUIRK_FORCE_1_BIT_DATA (1<<22)
+/* Controller needs 10ms delay between applying power and clock */
+#define SDHCI_QUIRK_DELAY_AFTER_POWER (1<<23)
int irq; /* Device IRQ */
void __iomem * ioaddr; /* Mapped address */
diff --git a/drivers/mmc/host/via-sdmmc.c b/drivers/mmc/host/via-sdmmc.c
new file mode 100644
index 000000000000..632858a94376
--- /dev/null
+++ b/drivers/mmc/host/via-sdmmc.c
@@ -0,0 +1,1362 @@
+/*
+ * drivers/mmc/host/via-sdmmc.c - VIA SD/MMC Card Reader driver
+ * Copyright (c) 2008, VIA Technologies Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ */
+
+#include <linux/pci.h>
+#include <linux/dma-mapping.h>
+#include <linux/highmem.h>
+#include <linux/delay.h>
+
+#include <linux/mmc/host.h>
+
+#define DRV_NAME "via_sdmmc"
+
+#define PCI_DEVICE_ID_VIA_9530 0x9530
+
+#define VIA_CRDR_SDC_OFF 0x200
+#define VIA_CRDR_DDMA_OFF 0x400
+#define VIA_CRDR_PCICTRL_OFF 0x600
+
+#define VIA_CRDR_MIN_CLOCK 375000
+#define VIA_CRDR_MAX_CLOCK 48000000
+
+/*
+ * PCI registers
+ */
+
+#define VIA_CRDR_PCI_WORK_MODE 0x40
+#define VIA_CRDR_PCI_DBG_MODE 0x41
+
+/*
+ * SDC MMIO Registers
+ */
+
+#define VIA_CRDR_SDCTRL 0x0
+#define VIA_CRDR_SDCTRL_START 0x01
+#define VIA_CRDR_SDCTRL_WRITE 0x04
+#define VIA_CRDR_SDCTRL_SINGLE_WR 0x10
+#define VIA_CRDR_SDCTRL_SINGLE_RD 0x20
+#define VIA_CRDR_SDCTRL_MULTI_WR 0x30
+#define VIA_CRDR_SDCTRL_MULTI_RD 0x40
+#define VIA_CRDR_SDCTRL_STOP 0x70
+
+#define VIA_CRDR_SDCTRL_RSP_NONE 0x0
+#define VIA_CRDR_SDCTRL_RSP_R1 0x10000
+#define VIA_CRDR_SDCTRL_RSP_R2 0x20000
+#define VIA_CRDR_SDCTRL_RSP_R3 0x30000
+#define VIA_CRDR_SDCTRL_RSP_R1B 0x90000
+
+#define VIA_CRDR_SDCARG 0x4
+
+#define VIA_CRDR_SDBUSMODE 0x8
+#define VIA_CRDR_SDMODE_4BIT 0x02
+#define VIA_CRDR_SDMODE_CLK_ON 0x40
+
+#define VIA_CRDR_SDBLKLEN 0xc
+/*
+ * Bit 0 -Bit 10 : Block length. So, the maximum block length should be 2048.
+ * Bit 11 - Bit 13 : Reserved.
+ * GPIDET : Select GPI pin to detect card, GPI means CR_CD# in top design.
+ * INTEN : Enable SD host interrupt.
+ * Bit 16 - Bit 31 : Block count. So, the maximun block count should be 65536.
+ */
+#define VIA_CRDR_SDBLKLEN_GPIDET 0x2000
+#define VIA_CRDR_SDBLKLEN_INTEN 0x8000
+#define VIA_CRDR_MAX_BLOCK_COUNT 65536
+#define VIA_CRDR_MAX_BLOCK_LENGTH 2048
+
+#define VIA_CRDR_SDRESP0 0x10
+#define VIA_CRDR_SDRESP1 0x14
+#define VIA_CRDR_SDRESP2 0x18
+#define VIA_CRDR_SDRESP3 0x1c
+
+#define VIA_CRDR_SDCURBLKCNT 0x20
+
+#define VIA_CRDR_SDINTMASK 0x24
+/*
+ * MBDIE : Multiple Blocks transfer Done Interrupt Enable
+ * BDDIE : Block Data transfer Done Interrupt Enable
+ * CIRIE : Card Insertion or Removal Interrupt Enable
+ * CRDIE : Command-Response transfer Done Interrupt Enable
+ * CRTOIE : Command-Response response TimeOut Interrupt Enable
+ * ASCRDIE : Auto Stop Command-Response transfer Done Interrupt Enable
+ * DTIE : Data access Timeout Interrupt Enable
+ * SCIE : reSponse CRC error Interrupt Enable
+ * RCIE : Read data CRC error Interrupt Enable
+ * WCIE : Write data CRC error Interrupt Enable
+ */
+#define VIA_CRDR_SDINTMASK_MBDIE 0x10
+#define VIA_CRDR_SDINTMASK_BDDIE 0x20
+#define VIA_CRDR_SDINTMASK_CIRIE 0x80
+#define VIA_CRDR_SDINTMASK_CRDIE 0x200
+#define VIA_CRDR_SDINTMASK_CRTOIE 0x400
+#define VIA_CRDR_SDINTMASK_ASCRDIE 0x800
+#define VIA_CRDR_SDINTMASK_DTIE 0x1000
+#define VIA_CRDR_SDINTMASK_SCIE 0x2000
+#define VIA_CRDR_SDINTMASK_RCIE 0x4000
+#define VIA_CRDR_SDINTMASK_WCIE 0x8000
+
+#define VIA_CRDR_SDACTIVE_INTMASK \
+ (VIA_CRDR_SDINTMASK_MBDIE | VIA_CRDR_SDINTMASK_CIRIE \
+ | VIA_CRDR_SDINTMASK_CRDIE | VIA_CRDR_SDINTMASK_CRTOIE \
+ | VIA_CRDR_SDINTMASK_DTIE | VIA_CRDR_SDINTMASK_SCIE \
+ | VIA_CRDR_SDINTMASK_RCIE | VIA_CRDR_SDINTMASK_WCIE)
+
+#define VIA_CRDR_SDSTATUS 0x28
+/*
+ * CECC : Reserved
+ * WP : SD card Write Protect status
+ * SLOTD : Reserved
+ * SLOTG : SD SLOT status(Gpi pin status)
+ * MBD : Multiple Blocks transfer Done interrupt status
+ * BDD : Block Data transfer Done interrupt status
+ * CD : Reserved
+ * CIR : Card Insertion or Removal interrupt detected on GPI pin
+ * IO : Reserved
+ * CRD : Command-Response transfer Done interrupt status
+ * CRTO : Command-Response response TimeOut interrupt status
+ * ASCRDIE : Auto Stop Command-Response transfer Done interrupt status
+ * DT : Data access Timeout interrupt status
+ * SC : reSponse CRC error interrupt status
+ * RC : Read data CRC error interrupt status
+ * WC : Write data CRC error interrupt status
+ */
+#define VIA_CRDR_SDSTS_CECC 0x01
+#define VIA_CRDR_SDSTS_WP 0x02
+#define VIA_CRDR_SDSTS_SLOTD 0x04
+#define VIA_CRDR_SDSTS_SLOTG 0x08
+#define VIA_CRDR_SDSTS_MBD 0x10
+#define VIA_CRDR_SDSTS_BDD 0x20
+#define VIA_CRDR_SDSTS_CD 0x40
+#define VIA_CRDR_SDSTS_CIR 0x80
+#define VIA_CRDR_SDSTS_IO 0x100
+#define VIA_CRDR_SDSTS_CRD 0x200
+#define VIA_CRDR_SDSTS_CRTO 0x400
+#define VIA_CRDR_SDSTS_ASCRDIE 0x800
+#define VIA_CRDR_SDSTS_DT 0x1000
+#define VIA_CRDR_SDSTS_SC 0x2000
+#define VIA_CRDR_SDSTS_RC 0x4000
+#define VIA_CRDR_SDSTS_WC 0x8000
+
+#define VIA_CRDR_SDSTS_IGN_MASK\
+ (VIA_CRDR_SDSTS_BDD | VIA_CRDR_SDSTS_ASCRDIE | VIA_CRDR_SDSTS_IO)
+#define VIA_CRDR_SDSTS_INT_MASK \
+ (VIA_CRDR_SDSTS_MBD | VIA_CRDR_SDSTS_BDD | VIA_CRDR_SDSTS_CD \
+ | VIA_CRDR_SDSTS_CIR | VIA_CRDR_SDSTS_IO | VIA_CRDR_SDSTS_CRD \
+ | VIA_CRDR_SDSTS_CRTO | VIA_CRDR_SDSTS_ASCRDIE | VIA_CRDR_SDSTS_DT \
+ | VIA_CRDR_SDSTS_SC | VIA_CRDR_SDSTS_RC | VIA_CRDR_SDSTS_WC)
+#define VIA_CRDR_SDSTS_W1C_MASK \
+ (VIA_CRDR_SDSTS_CECC | VIA_CRDR_SDSTS_MBD | VIA_CRDR_SDSTS_BDD \
+ | VIA_CRDR_SDSTS_CD | VIA_CRDR_SDSTS_CIR | VIA_CRDR_SDSTS_CRD \
+ | VIA_CRDR_SDSTS_CRTO | VIA_CRDR_SDSTS_ASCRDIE | VIA_CRDR_SDSTS_DT \
+ | VIA_CRDR_SDSTS_SC | VIA_CRDR_SDSTS_RC | VIA_CRDR_SDSTS_WC)
+#define VIA_CRDR_SDSTS_CMD_MASK \
+ (VIA_CRDR_SDSTS_CRD | VIA_CRDR_SDSTS_CRTO | VIA_CRDR_SDSTS_SC)
+#define VIA_CRDR_SDSTS_DATA_MASK\
+ (VIA_CRDR_SDSTS_MBD | VIA_CRDR_SDSTS_DT \
+ | VIA_CRDR_SDSTS_RC | VIA_CRDR_SDSTS_WC)
+
+#define VIA_CRDR_SDSTATUS2 0x2a
+/*
+ * CFE : Enable SD host automatic Clock FReezing
+ */
+#define VIA_CRDR_SDSTS_CFE 0x80
+
+#define VIA_CRDR_SDRSPTMO 0x2C
+
+#define VIA_CRDR_SDCLKSEL 0x30
+
+#define VIA_CRDR_SDEXTCTRL 0x34
+#define VIS_CRDR_SDEXTCTRL_AUTOSTOP_SD 0x01
+#define VIS_CRDR_SDEXTCTRL_SHIFT_9 0x02
+#define VIS_CRDR_SDEXTCTRL_MMC_8BIT 0x04
+#define VIS_CRDR_SDEXTCTRL_RELD_BLK 0x08
+#define VIS_CRDR_SDEXTCTRL_BAD_CMDA 0x10
+#define VIS_CRDR_SDEXTCTRL_BAD_DATA 0x20
+#define VIS_CRDR_SDEXTCTRL_AUTOSTOP_SPI 0x40
+#define VIA_CRDR_SDEXTCTRL_HISPD 0x80
+/* 0x38-0xFF reserved */
+
+/*
+ * Data DMA Control Registers
+ */
+
+#define VIA_CRDR_DMABASEADD 0x0
+#define VIA_CRDR_DMACOUNTER 0x4
+
+#define VIA_CRDR_DMACTRL 0x8
+/*
+ * DIR :Transaction Direction
+ * 0 : From card to memory
+ * 1 : From memory to card
+ */
+#define VIA_CRDR_DMACTRL_DIR 0x100
+#define VIA_CRDR_DMACTRL_ENIRQ 0x10000
+#define VIA_CRDR_DMACTRL_SFTRST 0x1000000
+
+#define VIA_CRDR_DMASTS 0xc
+
+#define VIA_CRDR_DMASTART 0x10
+/*0x14-0xFF reserved*/
+
+/*
+ * PCI Control Registers
+ */
+
+/*0x0 - 0x1 reserved*/
+#define VIA_CRDR_PCICLKGATT 0x2
+/*
+ * SFTRST :
+ * 0 : Soft reset all the controller and it will be de-asserted automatically
+ * 1 : Soft reset is de-asserted
+ */
+#define VIA_CRDR_PCICLKGATT_SFTRST 0x01
+/*
+ * 3V3 : Pad power select
+ * 0 : 1.8V
+ * 1 : 3.3V
+ * NOTE : No mater what the actual value should be, this bit always
+ * read as 0. This is a hardware bug.
+ */
+#define VIA_CRDR_PCICLKGATT_3V3 0x10
+/*
+ * PAD_PWRON : Pad Power on/off select
+ * 0 : Power off
+ * 1 : Power on
+ * NOTE : No mater what the actual value should be, this bit always
+ * read as 0. This is a hardware bug.
+ */
+#define VIA_CRDR_PCICLKGATT_PAD_PWRON 0x20
+
+#define VIA_CRDR_PCISDCCLK 0x5
+
+#define VIA_CRDR_PCIDMACLK 0x7
+#define VIA_CRDR_PCIDMACLK_SDC 0x2
+
+#define VIA_CRDR_PCIINTCTRL 0x8
+#define VIA_CRDR_PCIINTCTRL_SDCIRQEN 0x04
+
+#define VIA_CRDR_PCIINTSTATUS 0x9
+#define VIA_CRDR_PCIINTSTATUS_SDC 0x04
+
+#define VIA_CRDR_PCITMOCTRL 0xa
+#define VIA_CRDR_PCITMOCTRL_NO 0x0
+#define VIA_CRDR_PCITMOCTRL_32US 0x1
+#define VIA_CRDR_PCITMOCTRL_256US 0x2
+#define VIA_CRDR_PCITMOCTRL_1024US 0x3
+#define VIA_CRDR_PCITMOCTRL_256MS 0x4
+#define VIA_CRDR_PCITMOCTRL_512MS 0x5
+#define VIA_CRDR_PCITMOCTRL_1024MS 0x6
+
+/*0xB-0xFF reserved*/
+
+enum PCI_HOST_CLK_CONTROL {
+ PCI_CLK_375K = 0x03,
+ PCI_CLK_8M = 0x04,
+ PCI_CLK_12M = 0x00,
+ PCI_CLK_16M = 0x05,
+ PCI_CLK_24M = 0x01,
+ PCI_CLK_33M = 0x06,
+ PCI_CLK_48M = 0x02
+};
+
+struct sdhcreg {
+ u32 sdcontrol_reg;
+ u32 sdcmdarg_reg;
+ u32 sdbusmode_reg;
+ u32 sdblklen_reg;
+ u32 sdresp_reg[4];
+ u32 sdcurblkcnt_reg;
+ u32 sdintmask_reg;
+ u32 sdstatus_reg;
+ u32 sdrsptmo_reg;
+ u32 sdclksel_reg;
+ u32 sdextctrl_reg;
+};
+
+struct pcictrlreg {
+ u8 reserve[2];
+ u8 pciclkgat_reg;
+ u8 pcinfcclk_reg;
+ u8 pcimscclk_reg;
+ u8 pcisdclk_reg;
+ u8 pcicaclk_reg;
+ u8 pcidmaclk_reg;
+ u8 pciintctrl_reg;
+ u8 pciintstatus_reg;
+ u8 pcitmoctrl_reg;
+ u8 Resv;
+};
+
+struct via_crdr_mmc_host {
+ struct mmc_host *mmc;
+ struct mmc_request *mrq;
+ struct mmc_command *cmd;
+ struct mmc_data *data;
+
+ void __iomem *mmiobase;
+ void __iomem *sdhc_mmiobase;
+ void __iomem *ddma_mmiobase;
+ void __iomem *pcictrl_mmiobase;
+
+ struct pcictrlreg pm_pcictrl_reg;
+ struct sdhcreg pm_sdhc_reg;
+
+ struct work_struct carddet_work;
+ struct tasklet_struct finish_tasklet;
+
+ struct timer_list timer;
+ spinlock_t lock;
+ u8 power;
+ int reject;
+ unsigned int quirks;
+};
+
+/* some devices need a very long delay for power to stabilize */
+#define VIA_CRDR_QUIRK_300MS_PWRDELAY 0x0001
+
+static struct pci_device_id via_ids[] = {
+ {PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_9530,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0,},
+ {0,}
+};
+
+MODULE_DEVICE_TABLE(pci, via_ids);
+
+static void via_print_sdchc(struct via_crdr_mmc_host *host)
+{
+ void __iomem *addrbase = host->sdhc_mmiobase;
+
+ pr_debug("SDC MMIO Registers:\n");
+ pr_debug("SDCONTROL=%08x, SDCMDARG=%08x, SDBUSMODE=%08x\n",
+ readl(addrbase + VIA_CRDR_SDCTRL),
+ readl(addrbase + VIA_CRDR_SDCARG),
+ readl(addrbase + VIA_CRDR_SDBUSMODE));
+ pr_debug("SDBLKLEN=%08x, SDCURBLKCNT=%08x, SDINTMASK=%08x\n",
+ readl(addrbase + VIA_CRDR_SDBLKLEN),
+ readl(addrbase + VIA_CRDR_SDCURBLKCNT),
+ readl(addrbase + VIA_CRDR_SDINTMASK));
+ pr_debug("SDSTATUS=%08x, SDCLKSEL=%08x, SDEXTCTRL=%08x\n",
+ readl(addrbase + VIA_CRDR_SDSTATUS),
+ readl(addrbase + VIA_CRDR_SDCLKSEL),
+ readl(addrbase + VIA_CRDR_SDEXTCTRL));
+}
+
+static void via_print_pcictrl(struct via_crdr_mmc_host *host)
+{
+ void __iomem *addrbase = host->pcictrl_mmiobase;
+
+ pr_debug("PCI Control Registers:\n");
+ pr_debug("PCICLKGATT=%02x, PCISDCCLK=%02x, PCIDMACLK=%02x\n",
+ readb(addrbase + VIA_CRDR_PCICLKGATT),
+ readb(addrbase + VIA_CRDR_PCISDCCLK),
+ readb(addrbase + VIA_CRDR_PCIDMACLK));
+ pr_debug("PCIINTCTRL=%02x, PCIINTSTATUS=%02x\n",
+ readb(addrbase + VIA_CRDR_PCIINTCTRL),
+ readb(addrbase + VIA_CRDR_PCIINTSTATUS));
+}
+
+static void via_save_pcictrlreg(struct via_crdr_mmc_host *host)
+{
+ struct pcictrlreg *pm_pcictrl_reg;
+ void __iomem *addrbase;
+
+ pm_pcictrl_reg = &(host->pm_pcictrl_reg);
+ addrbase = host->pcictrl_mmiobase;
+
+ pm_pcictrl_reg->pciclkgat_reg = readb(addrbase + VIA_CRDR_PCICLKGATT);
+ pm_pcictrl_reg->pciclkgat_reg |=
+ VIA_CRDR_PCICLKGATT_3V3 | VIA_CRDR_PCICLKGATT_PAD_PWRON;
+ pm_pcictrl_reg->pcisdclk_reg = readb(addrbase + VIA_CRDR_PCISDCCLK);
+ pm_pcictrl_reg->pcidmaclk_reg = readb(addrbase + VIA_CRDR_PCIDMACLK);
+ pm_pcictrl_reg->pciintctrl_reg = readb(addrbase + VIA_CRDR_PCIINTCTRL);
+ pm_pcictrl_reg->pciintstatus_reg =
+ readb(addrbase + VIA_CRDR_PCIINTSTATUS);
+ pm_pcictrl_reg->pcitmoctrl_reg = readb(addrbase + VIA_CRDR_PCITMOCTRL);
+}
+
+static void via_restore_pcictrlreg(struct via_crdr_mmc_host *host)
+{
+ struct pcictrlreg *pm_pcictrl_reg;
+ void __iomem *addrbase;
+
+ pm_pcictrl_reg = &(host->pm_pcictrl_reg);
+ addrbase = host->pcictrl_mmiobase;
+
+ writeb(pm_pcictrl_reg->pciclkgat_reg, addrbase + VIA_CRDR_PCICLKGATT);
+ writeb(pm_pcictrl_reg->pcisdclk_reg, addrbase + VIA_CRDR_PCISDCCLK);
+ writeb(pm_pcictrl_reg->pcidmaclk_reg, addrbase + VIA_CRDR_PCIDMACLK);
+ writeb(pm_pcictrl_reg->pciintctrl_reg, addrbase + VIA_CRDR_PCIINTCTRL);
+ writeb(pm_pcictrl_reg->pciintstatus_reg,
+ addrbase + VIA_CRDR_PCIINTSTATUS);
+ writeb(pm_pcictrl_reg->pcitmoctrl_reg, addrbase + VIA_CRDR_PCITMOCTRL);
+}
+
+static void via_save_sdcreg(struct via_crdr_mmc_host *host)
+{
+ struct sdhcreg *pm_sdhc_reg;
+ void __iomem *addrbase;
+
+ pm_sdhc_reg = &(host->pm_sdhc_reg);
+ addrbase = host->sdhc_mmiobase;
+
+ pm_sdhc_reg->sdcontrol_reg = readl(addrbase + VIA_CRDR_SDCTRL);
+ pm_sdhc_reg->sdcmdarg_reg = readl(addrbase + VIA_CRDR_SDCARG);
+ pm_sdhc_reg->sdbusmode_reg = readl(addrbase + VIA_CRDR_SDBUSMODE);
+ pm_sdhc_reg->sdblklen_reg = readl(addrbase + VIA_CRDR_SDBLKLEN);
+ pm_sdhc_reg->sdcurblkcnt_reg = readl(addrbase + VIA_CRDR_SDCURBLKCNT);
+ pm_sdhc_reg->sdintmask_reg = readl(addrbase + VIA_CRDR_SDINTMASK);
+ pm_sdhc_reg->sdstatus_reg = readl(addrbase + VIA_CRDR_SDSTATUS);
+ pm_sdhc_reg->sdrsptmo_reg = readl(addrbase + VIA_CRDR_SDRSPTMO);
+ pm_sdhc_reg->sdclksel_reg = readl(addrbase + VIA_CRDR_SDCLKSEL);
+ pm_sdhc_reg->sdextctrl_reg = readl(addrbase + VIA_CRDR_SDEXTCTRL);
+}
+
+static void via_restore_sdcreg(struct via_crdr_mmc_host *host)
+{
+ struct sdhcreg *pm_sdhc_reg;
+ void __iomem *addrbase;
+
+ pm_sdhc_reg = &(host->pm_sdhc_reg);
+ addrbase = host->sdhc_mmiobase;
+
+ writel(pm_sdhc_reg->sdcontrol_reg, addrbase + VIA_CRDR_SDCTRL);
+ writel(pm_sdhc_reg->sdcmdarg_reg, addrbase + VIA_CRDR_SDCARG);
+ writel(pm_sdhc_reg->sdbusmode_reg, addrbase + VIA_CRDR_SDBUSMODE);
+ writel(pm_sdhc_reg->sdblklen_reg, addrbase + VIA_CRDR_SDBLKLEN);
+ writel(pm_sdhc_reg->sdcurblkcnt_reg, addrbase + VIA_CRDR_SDCURBLKCNT);
+ writel(pm_sdhc_reg->sdintmask_reg, addrbase + VIA_CRDR_SDINTMASK);
+ writel(pm_sdhc_reg->sdstatus_reg, addrbase + VIA_CRDR_SDSTATUS);
+ writel(pm_sdhc_reg->sdrsptmo_reg, addrbase + VIA_CRDR_SDRSPTMO);
+ writel(pm_sdhc_reg->sdclksel_reg, addrbase + VIA_CRDR_SDCLKSEL);
+ writel(pm_sdhc_reg->sdextctrl_reg, addrbase + VIA_CRDR_SDEXTCTRL);
+}
+
+static void via_pwron_sleep(struct via_crdr_mmc_host *sdhost)
+{
+ if (sdhost->quirks & VIA_CRDR_QUIRK_300MS_PWRDELAY)
+ msleep(300);
+ else
+ msleep(3);
+}
+
+static void via_set_ddma(struct via_crdr_mmc_host *host,
+ dma_addr_t dmaaddr, u32 count, int dir, int enirq)
+{
+ void __iomem *addrbase;
+ u32 ctrl_data = 0;
+
+ if (enirq)
+ ctrl_data |= VIA_CRDR_DMACTRL_ENIRQ;
+
+ if (dir)
+ ctrl_data |= VIA_CRDR_DMACTRL_DIR;
+
+ addrbase = host->ddma_mmiobase;
+
+ writel(dmaaddr, addrbase + VIA_CRDR_DMABASEADD);
+ writel(count, addrbase + VIA_CRDR_DMACOUNTER);
+ writel(ctrl_data, addrbase + VIA_CRDR_DMACTRL);
+ writel(0x01, addrbase + VIA_CRDR_DMASTART);
+
+ /* It seems that our DMA can not work normally with 375kHz clock */
+ /* FIXME: don't brute-force 8MHz but use PIO at 375kHz !! */
+ addrbase = host->pcictrl_mmiobase;
+ if (readb(addrbase + VIA_CRDR_PCISDCCLK) == PCI_CLK_375K) {
+ dev_info(host->mmc->parent, "forcing card speed to 8MHz\n");
+ writeb(PCI_CLK_8M, addrbase + VIA_CRDR_PCISDCCLK);
+ }
+}
+
+static void via_sdc_preparedata(struct via_crdr_mmc_host *host,
+ struct mmc_data *data)
+{
+ void __iomem *addrbase;
+ u32 blk_reg;
+ int count;
+
+ WARN_ON(host->data);
+
+ /* Sanity checks */
+ BUG_ON(data->blksz > host->mmc->max_blk_size);
+ BUG_ON(data->blocks > host->mmc->max_blk_count);
+
+ host->data = data;
+
+ count = dma_map_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
+ ((data->flags & MMC_DATA_READ) ?
+ PCI_DMA_FROMDEVICE : PCI_DMA_TODEVICE));
+ BUG_ON(count != 1);
+
+ via_set_ddma(host, sg_dma_address(data->sg), sg_dma_len(data->sg),
+ (data->flags & MMC_DATA_WRITE) ? 1 : 0, 1);
+
+ addrbase = host->sdhc_mmiobase;
+
+ blk_reg = data->blksz - 1;
+ blk_reg |= VIA_CRDR_SDBLKLEN_GPIDET | VIA_CRDR_SDBLKLEN_INTEN;
+ blk_reg |= (data->blocks) << 16;
+
+ writel(blk_reg, addrbase + VIA_CRDR_SDBLKLEN);
+}
+
+static void via_sdc_get_response(struct via_crdr_mmc_host *host,
+ struct mmc_command *cmd)
+{
+ void __iomem *addrbase = host->sdhc_mmiobase;
+ u32 dwdata0 = readl(addrbase + VIA_CRDR_SDRESP0);
+ u32 dwdata1 = readl(addrbase + VIA_CRDR_SDRESP1);
+ u32 dwdata2 = readl(addrbase + VIA_CRDR_SDRESP2);
+ u32 dwdata3 = readl(addrbase + VIA_CRDR_SDRESP3);
+
+ if (cmd->flags & MMC_RSP_136) {
+ cmd->resp[0] = ((u8) (dwdata1)) |
+ (((u8) (dwdata0 >> 24)) << 8) |
+ (((u8) (dwdata0 >> 16)) << 16) |
+ (((u8) (dwdata0 >> 8)) << 24);
+
+ cmd->resp[1] = ((u8) (dwdata2)) |
+ (((u8) (dwdata1 >> 24)) << 8) |
+ (((u8) (dwdata1 >> 16)) << 16) |
+ (((u8) (dwdata1 >> 8)) << 24);
+
+ cmd->resp[2] = ((u8) (dwdata3)) |
+ (((u8) (dwdata2 >> 24)) << 8) |
+ (((u8) (dwdata2 >> 16)) << 16) |
+ (((u8) (dwdata2 >> 8)) << 24);
+
+ cmd->resp[3] = 0xff |
+ ((((u8) (dwdata3 >> 24))) << 8) |
+ (((u8) (dwdata3 >> 16)) << 16) |
+ (((u8) (dwdata3 >> 8)) << 24);
+ } else {
+ dwdata0 >>= 8;
+ cmd->resp[0] = ((dwdata0 & 0xff) << 24) |
+ (((dwdata0 >> 8) & 0xff) << 16) |
+ (((dwdata0 >> 16) & 0xff) << 8) | (dwdata1 & 0xff);
+
+ dwdata1 >>= 8;
+ cmd->resp[1] = ((dwdata1 & 0xff) << 24) |
+ (((dwdata1 >> 8) & 0xff) << 16) |
+ (((dwdata1 >> 16) & 0xff) << 8);
+ }
+}
+
+static void via_sdc_send_command(struct via_crdr_mmc_host *host,
+ struct mmc_command *cmd)
+{
+ void __iomem *addrbase;
+ struct mmc_data *data;
+ u32 cmdctrl = 0;
+
+ WARN_ON(host->cmd);
+
+ data = cmd->data;
+ mod_timer(&host->timer, jiffies + HZ);
+ host->cmd = cmd;
+
+ /*Command index*/
+ cmdctrl = cmd->opcode << 8;
+
+ /*Response type*/
+ switch (mmc_resp_type(cmd)) {
+ case MMC_RSP_NONE:
+ cmdctrl |= VIA_CRDR_SDCTRL_RSP_NONE;
+ break;
+ case MMC_RSP_R1:
+ cmdctrl |= VIA_CRDR_SDCTRL_RSP_R1;
+ break;
+ case MMC_RSP_R1B:
+ cmdctrl |= VIA_CRDR_SDCTRL_RSP_R1B;
+ break;
+ case MMC_RSP_R2:
+ cmdctrl |= VIA_CRDR_SDCTRL_RSP_R2;
+ break;
+ case MMC_RSP_R3:
+ cmdctrl |= VIA_CRDR_SDCTRL_RSP_R3;
+ break;
+ default:
+ pr_err("%s: cmd->flag is not valid\n", mmc_hostname(host->mmc));
+ break;
+ }
+
+ if (!(cmd->data))
+ goto nodata;
+
+ via_sdc_preparedata(host, data);
+
+ /*Command control*/
+ if (data->blocks > 1) {
+ if (data->flags & MMC_DATA_WRITE) {
+ cmdctrl |= VIA_CRDR_SDCTRL_WRITE;
+ cmdctrl |= VIA_CRDR_SDCTRL_MULTI_WR;
+ } else {
+ cmdctrl |= VIA_CRDR_SDCTRL_MULTI_RD;
+ }
+ } else {
+ if (data->flags & MMC_DATA_WRITE) {
+ cmdctrl |= VIA_CRDR_SDCTRL_WRITE;
+ cmdctrl |= VIA_CRDR_SDCTRL_SINGLE_WR;
+ } else {
+ cmdctrl |= VIA_CRDR_SDCTRL_SINGLE_RD;
+ }
+ }
+
+nodata:
+ if (cmd == host->mrq->stop)
+ cmdctrl |= VIA_CRDR_SDCTRL_STOP;
+
+ cmdctrl |= VIA_CRDR_SDCTRL_START;
+
+ addrbase = host->sdhc_mmiobase;
+ writel(cmd->arg, addrbase + VIA_CRDR_SDCARG);
+ writel(cmdctrl, addrbase + VIA_CRDR_SDCTRL);
+}
+
+static void via_sdc_finish_data(struct via_crdr_mmc_host *host)
+{
+ struct mmc_data *data;
+
+ BUG_ON(!host->data);
+
+ data = host->data;
+ host->data = NULL;
+
+ if (data->error)
+ data->bytes_xfered = 0;
+ else
+ data->bytes_xfered = data->blocks * data->blksz;
+
+ dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
+ ((data->flags & MMC_DATA_READ) ?
+ PCI_DMA_FROMDEVICE : PCI_DMA_TODEVICE));
+
+ if (data->stop)
+ via_sdc_send_command(host, data->stop);
+ else
+ tasklet_schedule(&host->finish_tasklet);
+}
+
+static void via_sdc_finish_command(struct via_crdr_mmc_host *host)
+{
+ via_sdc_get_response(host, host->cmd);
+
+ host->cmd->error = 0;
+
+ if (!host->cmd->data)
+ tasklet_schedule(&host->finish_tasklet);
+
+ host->cmd = NULL;
+}
+
+static void via_sdc_request(struct mmc_host *mmc, struct mmc_request *mrq)
+{
+ void __iomem *addrbase;
+ struct via_crdr_mmc_host *host;
+ unsigned long flags;
+ u16 status;
+
+ host = mmc_priv(mmc);
+
+ spin_lock_irqsave(&host->lock, flags);
+
+ addrbase = host->pcictrl_mmiobase;
+ writeb(VIA_CRDR_PCIDMACLK_SDC, addrbase + VIA_CRDR_PCIDMACLK);
+
+ status = readw(host->sdhc_mmiobase + VIA_CRDR_SDSTATUS);
+ status &= VIA_CRDR_SDSTS_W1C_MASK;
+ writew(status, host->sdhc_mmiobase + VIA_CRDR_SDSTATUS);
+
+ WARN_ON(host->mrq != NULL);
+ host->mrq = mrq;
+
+ status = readw(host->sdhc_mmiobase + VIA_CRDR_SDSTATUS);
+ if (!(status & VIA_CRDR_SDSTS_SLOTG) || host->reject) {
+ host->mrq->cmd->error = -ENOMEDIUM;
+ tasklet_schedule(&host->finish_tasklet);
+ } else {
+ via_sdc_send_command(host, mrq->cmd);
+ }
+
+ mmiowb();
+ spin_unlock_irqrestore(&host->lock, flags);
+}
+
+static void via_sdc_set_power(struct via_crdr_mmc_host *host,
+ unsigned short power, unsigned int on)
+{
+ unsigned long flags;
+ u8 gatt;
+
+ spin_lock_irqsave(&host->lock, flags);
+
+ host->power = (1 << power);
+
+ gatt = readb(host->pcictrl_mmiobase + VIA_CRDR_PCICLKGATT);
+ if (host->power == MMC_VDD_165_195)
+ gatt &= ~VIA_CRDR_PCICLKGATT_3V3;
+ else
+ gatt |= VIA_CRDR_PCICLKGATT_3V3;
+ if (on)
+ gatt |= VIA_CRDR_PCICLKGATT_PAD_PWRON;
+ else
+ gatt &= ~VIA_CRDR_PCICLKGATT_PAD_PWRON;
+ writeb(gatt, host->pcictrl_mmiobase + VIA_CRDR_PCICLKGATT);
+
+ mmiowb();
+ spin_unlock_irqrestore(&host->lock, flags);
+
+ via_pwron_sleep(host);
+}
+
+static void via_sdc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
+{
+ struct via_crdr_mmc_host *host;
+ unsigned long flags;
+ void __iomem *addrbase;
+ u32 org_data, sdextctrl;
+ u8 clock;
+
+ host = mmc_priv(mmc);
+
+ spin_lock_irqsave(&host->lock, flags);
+
+ addrbase = host->sdhc_mmiobase;
+ org_data = readl(addrbase + VIA_CRDR_SDBUSMODE);
+ sdextctrl = readl(addrbase + VIA_CRDR_SDEXTCTRL);
+
+ if (ios->bus_width == MMC_BUS_WIDTH_1)
+ org_data &= ~VIA_CRDR_SDMODE_4BIT;
+ else
+ org_data |= VIA_CRDR_SDMODE_4BIT;
+
+ if (ios->power_mode == MMC_POWER_OFF)
+ org_data &= ~VIA_CRDR_SDMODE_CLK_ON;
+ else
+ org_data |= VIA_CRDR_SDMODE_CLK_ON;
+
+ if (ios->timing == MMC_TIMING_SD_HS)
+ sdextctrl |= VIA_CRDR_SDEXTCTRL_HISPD;
+ else
+ sdextctrl &= ~VIA_CRDR_SDEXTCTRL_HISPD;
+
+ writel(org_data, addrbase + VIA_CRDR_SDBUSMODE);
+ writel(sdextctrl, addrbase + VIA_CRDR_SDEXTCTRL);
+
+ if (ios->clock >= 48000000)
+ clock = PCI_CLK_48M;
+ else if (ios->clock >= 33000000)
+ clock = PCI_CLK_33M;
+ else if (ios->clock >= 24000000)
+ clock = PCI_CLK_24M;
+ else if (ios->clock >= 16000000)
+ clock = PCI_CLK_16M;
+ else if (ios->clock >= 12000000)
+ clock = PCI_CLK_12M;
+ else if (ios->clock >= 8000000)
+ clock = PCI_CLK_8M;
+ else
+ clock = PCI_CLK_375K;
+
+ addrbase = host->pcictrl_mmiobase;
+ if (readb(addrbase + VIA_CRDR_PCISDCCLK) != clock)
+ writeb(clock, addrbase + VIA_CRDR_PCISDCCLK);
+
+ mmiowb();
+ spin_unlock_irqrestore(&host->lock, flags);
+
+ if (ios->power_mode != MMC_POWER_OFF)
+ via_sdc_set_power(host, ios->vdd, 1);
+ else
+ via_sdc_set_power(host, ios->vdd, 0);
+}
+
+static int via_sdc_get_ro(struct mmc_host *mmc)
+{
+ struct via_crdr_mmc_host *host;
+ unsigned long flags;
+ u16 status;
+
+ host = mmc_priv(mmc);
+
+ spin_lock_irqsave(&host->lock, flags);
+
+ status = readw(host->sdhc_mmiobase + VIA_CRDR_SDSTATUS);
+
+ spin_unlock_irqrestore(&host->lock, flags);
+
+ return !(status & VIA_CRDR_SDSTS_WP);
+}
+
+static const struct mmc_host_ops via_sdc_ops = {
+ .request = via_sdc_request,
+ .set_ios = via_sdc_set_ios,
+ .get_ro = via_sdc_get_ro,
+};
+
+static void via_reset_pcictrl(struct via_crdr_mmc_host *host)
+{
+ void __iomem *addrbase;
+ unsigned long flags;
+ u8 gatt;
+
+ addrbase = host->pcictrl_mmiobase;
+
+ spin_lock_irqsave(&host->lock, flags);
+
+ via_save_pcictrlreg(host);
+ via_save_sdcreg(host);
+
+ spin_unlock_irqrestore(&host->lock, flags);
+
+ gatt = VIA_CRDR_PCICLKGATT_PAD_PWRON;
+ if (host->power == MMC_VDD_165_195)
+ gatt &= VIA_CRDR_PCICLKGATT_3V3;
+ else
+ gatt |= VIA_CRDR_PCICLKGATT_3V3;
+ writeb(gatt, host->pcictrl_mmiobase + VIA_CRDR_PCICLKGATT);
+ via_pwron_sleep(host);
+ gatt |= VIA_CRDR_PCICLKGATT_SFTRST;
+ writeb(gatt, host->pcictrl_mmiobase + VIA_CRDR_PCICLKGATT);
+ msleep(3);
+
+ spin_lock_irqsave(&host->lock, flags);
+
+ via_restore_pcictrlreg(host);
+ via_restore_sdcreg(host);
+
+ mmiowb();
+ spin_unlock_irqrestore(&host->lock, flags);
+}
+
+static void via_sdc_cmd_isr(struct via_crdr_mmc_host *host, u16 intmask)
+{
+ BUG_ON(intmask == 0);
+
+ if (!host->cmd) {
+ pr_err("%s: Got command interrupt 0x%x even "
+ "though no command operation was in progress.\n",
+ mmc_hostname(host->mmc), intmask);
+ return;
+ }
+
+ if (intmask & VIA_CRDR_SDSTS_CRTO)
+ host->cmd->error = -ETIMEDOUT;
+ else if (intmask & VIA_CRDR_SDSTS_SC)
+ host->cmd->error = -EILSEQ;
+
+ if (host->cmd->error)
+ tasklet_schedule(&host->finish_tasklet);
+ else if (intmask & VIA_CRDR_SDSTS_CRD)
+ via_sdc_finish_command(host);
+}
+
+static void via_sdc_data_isr(struct via_crdr_mmc_host *host, u16 intmask)
+{
+ BUG_ON(intmask == 0);
+
+ if (intmask & VIA_CRDR_SDSTS_DT)
+ host->data->error = -ETIMEDOUT;
+ else if (intmask & (VIA_CRDR_SDSTS_RC | VIA_CRDR_SDSTS_WC))
+ host->data->error = -EILSEQ;
+
+ via_sdc_finish_data(host);
+}
+
+static irqreturn_t via_sdc_isr(int irq, void *dev_id)
+{
+ struct via_crdr_mmc_host *sdhost = dev_id;
+ void __iomem *addrbase;
+ u8 pci_status;
+ u16 sd_status;
+ irqreturn_t result;
+
+ if (!sdhost)
+ return IRQ_NONE;
+
+ spin_lock(&sdhost->lock);
+
+ addrbase = sdhost->pcictrl_mmiobase;
+ pci_status = readb(addrbase + VIA_CRDR_PCIINTSTATUS);
+ if (!(pci_status & VIA_CRDR_PCIINTSTATUS_SDC)) {
+ result = IRQ_NONE;
+ goto out;
+ }
+
+ addrbase = sdhost->sdhc_mmiobase;
+ sd_status = readw(addrbase + VIA_CRDR_SDSTATUS);
+ sd_status &= VIA_CRDR_SDSTS_INT_MASK;
+ sd_status &= ~VIA_CRDR_SDSTS_IGN_MASK;
+ if (!sd_status) {
+ result = IRQ_NONE;
+ goto out;
+ }
+
+ if (sd_status & VIA_CRDR_SDSTS_CIR) {
+ writew(sd_status & VIA_CRDR_SDSTS_CIR,
+ addrbase + VIA_CRDR_SDSTATUS);
+
+ schedule_work(&sdhost->carddet_work);
+ }
+
+ sd_status &= ~VIA_CRDR_SDSTS_CIR;
+ if (sd_status & VIA_CRDR_SDSTS_CMD_MASK) {
+ writew(sd_status & VIA_CRDR_SDSTS_CMD_MASK,
+ addrbase + VIA_CRDR_SDSTATUS);
+ via_sdc_cmd_isr(sdhost, sd_status & VIA_CRDR_SDSTS_CMD_MASK);
+ }
+ if (sd_status & VIA_CRDR_SDSTS_DATA_MASK) {
+ writew(sd_status & VIA_CRDR_SDSTS_DATA_MASK,
+ addrbase + VIA_CRDR_SDSTATUS);
+ via_sdc_data_isr(sdhost, sd_status & VIA_CRDR_SDSTS_DATA_MASK);
+ }
+
+ sd_status &= ~(VIA_CRDR_SDSTS_CMD_MASK | VIA_CRDR_SDSTS_DATA_MASK);
+ if (sd_status) {
+ pr_err("%s: Unexpected interrupt 0x%x\n",
+ mmc_hostname(sdhost->mmc), sd_status);
+ writew(sd_status, addrbase + VIA_CRDR_SDSTATUS);
+ }
+
+ result = IRQ_HANDLED;
+
+ mmiowb();
+out:
+ spin_unlock(&sdhost->lock);
+
+ return result;
+}
+
+static void via_sdc_timeout(unsigned long ulongdata)
+{
+ struct via_crdr_mmc_host *sdhost;
+ unsigned long flags;
+
+ sdhost = (struct via_crdr_mmc_host *)ulongdata;
+
+ spin_lock_irqsave(&sdhost->lock, flags);
+
+ if (sdhost->mrq) {
+ pr_err("%s: Timeout waiting for hardware interrupt."
+ "cmd:0x%x\n", mmc_hostname(sdhost->mmc),
+ sdhost->mrq->cmd->opcode);
+
+ if (sdhost->data) {
+ writel(VIA_CRDR_DMACTRL_SFTRST,
+ sdhost->ddma_mmiobase + VIA_CRDR_DMACTRL);
+ sdhost->data->error = -ETIMEDOUT;
+ via_sdc_finish_data(sdhost);
+ } else {
+ if (sdhost->cmd)
+ sdhost->cmd->error = -ETIMEDOUT;
+ else
+ sdhost->mrq->cmd->error = -ETIMEDOUT;
+ tasklet_schedule(&sdhost->finish_tasklet);
+ }
+ }
+
+ mmiowb();
+ spin_unlock_irqrestore(&sdhost->lock, flags);
+}
+
+static void via_sdc_tasklet_finish(unsigned long param)
+{
+ struct via_crdr_mmc_host *host;
+ unsigned long flags;
+ struct mmc_request *mrq;
+
+ host = (struct via_crdr_mmc_host *)param;
+
+ spin_lock_irqsave(&host->lock, flags);
+
+ del_timer(&host->timer);
+ mrq = host->mrq;
+ host->mrq = NULL;
+ host->cmd = NULL;
+ host->data = NULL;
+
+ spin_unlock_irqrestore(&host->lock, flags);
+
+ mmc_request_done(host->mmc, mrq);
+}
+
+static void via_sdc_card_detect(struct work_struct *work)
+{
+ struct via_crdr_mmc_host *host;
+ void __iomem *addrbase;
+ unsigned long flags;
+ u16 status;
+
+ host = container_of(work, struct via_crdr_mmc_host, carddet_work);
+
+ addrbase = host->ddma_mmiobase;
+ writel(VIA_CRDR_DMACTRL_SFTRST, addrbase + VIA_CRDR_DMACTRL);
+
+ spin_lock_irqsave(&host->lock, flags);
+
+ addrbase = host->pcictrl_mmiobase;
+ writeb(VIA_CRDR_PCIDMACLK_SDC, addrbase + VIA_CRDR_PCIDMACLK);
+
+ addrbase = host->sdhc_mmiobase;
+ status = readw(addrbase + VIA_CRDR_SDSTATUS);
+ if (!(status & VIA_CRDR_SDSTS_SLOTG)) {
+ if (host->mrq) {
+ pr_err("%s: Card removed during transfer!\n",
+ mmc_hostname(host->mmc));
+ host->mrq->cmd->error = -ENOMEDIUM;
+ tasklet_schedule(&host->finish_tasklet);
+ }
+
+ mmiowb();
+ spin_unlock_irqrestore(&host->lock, flags);
+
+ via_reset_pcictrl(host);
+
+ spin_lock_irqsave(&host->lock, flags);
+ }
+
+ mmiowb();
+ spin_unlock_irqrestore(&host->lock, flags);
+
+ via_print_pcictrl(host);
+ via_print_sdchc(host);
+
+ mmc_detect_change(host->mmc, msecs_to_jiffies(500));
+}
+
+static void via_init_mmc_host(struct via_crdr_mmc_host *host)
+{
+ struct mmc_host *mmc = host->mmc;
+ void __iomem *addrbase;
+ u32 lenreg;
+ u32 status;
+
+ init_timer(&host->timer);
+ host->timer.data = (unsigned long)host;
+ host->timer.function = via_sdc_timeout;
+
+ spin_lock_init(&host->lock);
+
+ mmc->f_min = VIA_CRDR_MIN_CLOCK;
+ mmc->f_max = VIA_CRDR_MAX_CLOCK;
+ mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34 | MMC_VDD_165_195;
+ mmc->caps = MMC_CAP_4_BIT_DATA | MMC_CAP_SD_HIGHSPEED;
+ mmc->ops = &via_sdc_ops;
+
+ /*Hardware cannot do scatter lists*/
+ mmc->max_hw_segs = 1;
+ mmc->max_phys_segs = 1;
+
+ mmc->max_blk_size = VIA_CRDR_MAX_BLOCK_LENGTH;
+ mmc->max_blk_count = VIA_CRDR_MAX_BLOCK_COUNT;
+
+ mmc->max_seg_size = mmc->max_blk_size * mmc->max_blk_count;
+ mmc->max_req_size = mmc->max_seg_size;
+
+ INIT_WORK(&host->carddet_work, via_sdc_card_detect);
+
+ tasklet_init(&host->finish_tasklet, via_sdc_tasklet_finish,
+ (unsigned long)host);
+
+ addrbase = host->sdhc_mmiobase;
+ writel(0x0, addrbase + VIA_CRDR_SDINTMASK);
+ msleep(1);
+
+ lenreg = VIA_CRDR_SDBLKLEN_GPIDET | VIA_CRDR_SDBLKLEN_INTEN;
+ writel(lenreg, addrbase + VIA_CRDR_SDBLKLEN);
+
+ status = readw(addrbase + VIA_CRDR_SDSTATUS);
+ status &= VIA_CRDR_SDSTS_W1C_MASK;
+ writew(status, addrbase + VIA_CRDR_SDSTATUS);
+
+ status = readw(addrbase + VIA_CRDR_SDSTATUS2);
+ status |= VIA_CRDR_SDSTS_CFE;
+ writew(status, addrbase + VIA_CRDR_SDSTATUS2);
+
+ writeb(0x0, addrbase + VIA_CRDR_SDEXTCTRL);
+
+ writel(VIA_CRDR_SDACTIVE_INTMASK, addrbase + VIA_CRDR_SDINTMASK);
+ msleep(1);
+}
+
+static int __devinit via_sd_probe(struct pci_dev *pcidev,
+ const struct pci_device_id *id)
+{
+ struct mmc_host *mmc;
+ struct via_crdr_mmc_host *sdhost;
+ u32 base, len;
+ u8 rev, gatt;
+ int ret;
+
+ pci_read_config_byte(pcidev, PCI_CLASS_REVISION, &rev);
+ pr_info(DRV_NAME
+ ": VIA SDMMC controller found at %s [%04x:%04x] (rev %x)\n",
+ pci_name(pcidev), (int)pcidev->vendor, (int)pcidev->device,
+ (int)rev);
+
+ ret = pci_enable_device(pcidev);
+ if (ret)
+ return ret;
+
+ ret = pci_request_regions(pcidev, DRV_NAME);
+ if (ret)
+ goto disable;
+
+ pci_write_config_byte(pcidev, VIA_CRDR_PCI_WORK_MODE, 0);
+ pci_write_config_byte(pcidev, VIA_CRDR_PCI_DBG_MODE, 0);
+
+ mmc = mmc_alloc_host(sizeof(struct via_crdr_mmc_host), &pcidev->dev);
+ if (!mmc) {
+ ret = -ENOMEM;
+ goto release;
+ }
+
+ sdhost = mmc_priv(mmc);
+ sdhost->mmc = mmc;
+ dev_set_drvdata(&pcidev->dev, sdhost);
+
+ len = pci_resource_len(pcidev, 0);
+ base = pci_resource_start(pcidev, 0);
+ sdhost->mmiobase = ioremap_nocache(base, len);
+ if (!sdhost->mmiobase) {
+ ret = -ENOMEM;
+ goto free_mmc_host;
+ }
+
+ sdhost->sdhc_mmiobase =
+ sdhost->mmiobase + VIA_CRDR_SDC_OFF;
+ sdhost->ddma_mmiobase =
+ sdhost->mmiobase + VIA_CRDR_DDMA_OFF;
+ sdhost->pcictrl_mmiobase =
+ sdhost->mmiobase + VIA_CRDR_PCICTRL_OFF;
+
+ sdhost->power = MMC_VDD_165_195;
+
+ gatt = VIA_CRDR_PCICLKGATT_3V3 | VIA_CRDR_PCICLKGATT_PAD_PWRON;
+ writeb(gatt, sdhost->pcictrl_mmiobase + VIA_CRDR_PCICLKGATT);
+ via_pwron_sleep(sdhost);
+ gatt |= VIA_CRDR_PCICLKGATT_SFTRST;
+ writeb(gatt, sdhost->pcictrl_mmiobase + VIA_CRDR_PCICLKGATT);
+ msleep(3);
+
+ via_init_mmc_host(sdhost);
+
+ ret =
+ request_irq(pcidev->irq, via_sdc_isr, IRQF_SHARED, DRV_NAME,
+ sdhost);
+ if (ret)
+ goto unmap;
+
+ writeb(VIA_CRDR_PCIINTCTRL_SDCIRQEN,
+ sdhost->pcictrl_mmiobase + VIA_CRDR_PCIINTCTRL);
+ writeb(VIA_CRDR_PCITMOCTRL_1024MS,
+ sdhost->pcictrl_mmiobase + VIA_CRDR_PCITMOCTRL);
+
+ /* device-specific quirks */
+ if (pcidev->subsystem_vendor == PCI_VENDOR_ID_LENOVO &&
+ pcidev->subsystem_device == 0x3891)
+ sdhost->quirks = VIA_CRDR_QUIRK_300MS_PWRDELAY;
+
+ mmc_add_host(mmc);
+
+ return 0;
+
+unmap:
+ iounmap(sdhost->mmiobase);
+free_mmc_host:
+ dev_set_drvdata(&pcidev->dev, NULL);
+ mmc_free_host(mmc);
+release:
+ pci_release_regions(pcidev);
+disable:
+ pci_disable_device(pcidev);
+
+ return ret;
+}
+
+static void __devexit via_sd_remove(struct pci_dev *pcidev)
+{
+ struct via_crdr_mmc_host *sdhost = pci_get_drvdata(pcidev);
+ unsigned long flags;
+ u8 gatt;
+
+ spin_lock_irqsave(&sdhost->lock, flags);
+
+ /* Ensure we don't accept more commands from mmc layer */
+ sdhost->reject = 1;
+
+ /* Disable generating further interrupts */
+ writeb(0x0, sdhost->pcictrl_mmiobase + VIA_CRDR_PCIINTCTRL);
+ mmiowb();
+
+ if (sdhost->mrq) {
+ printk(KERN_ERR "%s: Controller removed during "
+ "transfer\n", mmc_hostname(sdhost->mmc));
+
+ /* make sure all DMA is stopped */
+ writel(VIA_CRDR_DMACTRL_SFTRST,
+ sdhost->ddma_mmiobase + VIA_CRDR_DMACTRL);
+ mmiowb();
+ sdhost->mrq->cmd->error = -ENOMEDIUM;
+ if (sdhost->mrq->stop)
+ sdhost->mrq->stop->error = -ENOMEDIUM;
+ tasklet_schedule(&sdhost->finish_tasklet);
+ }
+ spin_unlock_irqrestore(&sdhost->lock, flags);
+
+ mmc_remove_host(sdhost->mmc);
+
+ free_irq(pcidev->irq, sdhost);
+
+ del_timer_sync(&sdhost->timer);
+
+ tasklet_kill(&sdhost->finish_tasklet);
+
+ /* switch off power */
+ gatt = readb(sdhost->pcictrl_mmiobase + VIA_CRDR_PCICLKGATT);
+ gatt &= ~VIA_CRDR_PCICLKGATT_PAD_PWRON;
+ writeb(gatt, sdhost->pcictrl_mmiobase + VIA_CRDR_PCICLKGATT);
+
+ iounmap(sdhost->mmiobase);
+ dev_set_drvdata(&pcidev->dev, NULL);
+ mmc_free_host(sdhost->mmc);
+ pci_release_regions(pcidev);
+ pci_disable_device(pcidev);
+
+ pr_info(DRV_NAME
+ ": VIA SDMMC controller at %s [%04x:%04x] has been removed\n",
+ pci_name(pcidev), (int)pcidev->vendor, (int)pcidev->device);
+}
+
+#ifdef CONFIG_PM
+
+static void via_init_sdc_pm(struct via_crdr_mmc_host *host)
+{
+ struct sdhcreg *pm_sdhcreg;
+ void __iomem *addrbase;
+ u32 lenreg;
+ u16 status;
+
+ pm_sdhcreg = &(host->pm_sdhc_reg);
+ addrbase = host->sdhc_mmiobase;
+
+ writel(0x0, addrbase + VIA_CRDR_SDINTMASK);
+
+ lenreg = VIA_CRDR_SDBLKLEN_GPIDET | VIA_CRDR_SDBLKLEN_INTEN;
+ writel(lenreg, addrbase + VIA_CRDR_SDBLKLEN);
+
+ status = readw(addrbase + VIA_CRDR_SDSTATUS);
+ status &= VIA_CRDR_SDSTS_W1C_MASK;
+ writew(status, addrbase + VIA_CRDR_SDSTATUS);
+
+ status = readw(addrbase + VIA_CRDR_SDSTATUS2);
+ status |= VIA_CRDR_SDSTS_CFE;
+ writew(status, addrbase + VIA_CRDR_SDSTATUS2);
+
+ writel(pm_sdhcreg->sdcontrol_reg, addrbase + VIA_CRDR_SDCTRL);
+ writel(pm_sdhcreg->sdcmdarg_reg, addrbase + VIA_CRDR_SDCARG);
+ writel(pm_sdhcreg->sdintmask_reg, addrbase + VIA_CRDR_SDINTMASK);
+ writel(pm_sdhcreg->sdrsptmo_reg, addrbase + VIA_CRDR_SDRSPTMO);
+ writel(pm_sdhcreg->sdclksel_reg, addrbase + VIA_CRDR_SDCLKSEL);
+ writel(pm_sdhcreg->sdextctrl_reg, addrbase + VIA_CRDR_SDEXTCTRL);
+
+ via_print_pcictrl(host);
+ via_print_sdchc(host);
+}
+
+static int via_sd_suspend(struct pci_dev *pcidev, pm_message_t state)
+{
+ struct via_crdr_mmc_host *host;
+ int ret = 0;
+
+ host = pci_get_drvdata(pcidev);
+
+ via_save_pcictrlreg(host);
+ via_save_sdcreg(host);
+
+ ret = mmc_suspend_host(host->mmc, state);
+
+ pci_save_state(pcidev);
+ pci_enable_wake(pcidev, pci_choose_state(pcidev, state), 0);
+ pci_disable_device(pcidev);
+ pci_set_power_state(pcidev, pci_choose_state(pcidev, state));
+
+ return ret;
+}
+
+static int via_sd_resume(struct pci_dev *pcidev)
+{
+ struct via_crdr_mmc_host *sdhost;
+ int ret = 0;
+ u8 gatt;
+
+ sdhost = pci_get_drvdata(pcidev);
+
+ gatt = VIA_CRDR_PCICLKGATT_PAD_PWRON;
+ if (sdhost->power == MMC_VDD_165_195)
+ gatt &= ~VIA_CRDR_PCICLKGATT_3V3;
+ else
+ gatt |= VIA_CRDR_PCICLKGATT_3V3;
+ writeb(gatt, sdhost->pcictrl_mmiobase + VIA_CRDR_PCICLKGATT);
+ via_pwron_sleep(sdhost);
+ gatt |= VIA_CRDR_PCICLKGATT_SFTRST;
+ writeb(gatt, sdhost->pcictrl_mmiobase + VIA_CRDR_PCICLKGATT);
+ msleep(3);
+
+ msleep(100);
+
+ pci_set_power_state(pcidev, PCI_D0);
+ pci_restore_state(pcidev);
+ ret = pci_enable_device(pcidev);
+ if (ret)
+ return ret;
+
+ via_restore_pcictrlreg(sdhost);
+ via_init_sdc_pm(sdhost);
+
+ ret = mmc_resume_host(sdhost->mmc);
+
+ return ret;
+}
+
+#else /* CONFIG_PM */
+
+#define via_sd_suspend NULL
+#define via_sd_resume NULL
+
+#endif /* CONFIG_PM */
+
+static struct pci_driver via_sd_driver = {
+ .name = DRV_NAME,
+ .id_table = via_ids,
+ .probe = via_sd_probe,
+ .remove = __devexit_p(via_sd_remove),
+ .suspend = via_sd_suspend,
+ .resume = via_sd_resume,
+};
+
+static int __init via_sd_drv_init(void)
+{
+ pr_info(DRV_NAME ": VIA SD/MMC Card Reader driver "
+ "(C) 2008 VIA Technologies, Inc.\n");
+
+ return pci_register_driver(&via_sd_driver);
+}
+
+static void __exit via_sd_drv_exit(void)
+{
+ pci_unregister_driver(&via_sd_driver);
+}
+
+module_init(via_sd_drv_init);
+module_exit(via_sd_drv_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("VIA Technologies Inc.");
+MODULE_DESCRIPTION("VIA SD/MMC Card Interface driver");
diff --git a/drivers/mtd/chips/cfi_cmdset_0001.c b/drivers/mtd/chips/cfi_cmdset_0001.c
index c240454fd113..8664feebc93b 100644
--- a/drivers/mtd/chips/cfi_cmdset_0001.c
+++ b/drivers/mtd/chips/cfi_cmdset_0001.c
@@ -46,6 +46,7 @@
#define MANUFACTURER_INTEL 0x0089
#define I82802AB 0x00ad
#define I82802AC 0x00ac
+#define PF38F4476 0x881c
#define MANUFACTURER_ST 0x0020
#define M50LPW080 0x002F
#define M50FLW080A 0x0080
@@ -315,10 +316,20 @@ static struct cfi_fixup fixup_table[] = {
{ 0, 0, NULL, NULL }
};
+static void cfi_fixup_major_minor(struct cfi_private *cfi,
+ struct cfi_pri_intelext *extp)
+{
+ if (cfi->mfr == MANUFACTURER_INTEL &&
+ cfi->id == PF38F4476 && extp->MinorVersion == '3')
+ extp->MinorVersion = '1';
+}
+
static inline struct cfi_pri_intelext *
read_pri_intelext(struct map_info *map, __u16 adr)
{
+ struct cfi_private *cfi = map->fldrv_priv;
struct cfi_pri_intelext *extp;
+ unsigned int extra_size = 0;
unsigned int extp_size = sizeof(*extp);
again:
@@ -326,6 +337,8 @@ read_pri_intelext(struct map_info *map, __u16 adr)
if (!extp)
return NULL;
+ cfi_fixup_major_minor(cfi, extp);
+
if (extp->MajorVersion != '1' ||
(extp->MinorVersion < '0' || extp->MinorVersion > '5')) {
printk(KERN_ERR " Unknown Intel/Sharp Extended Query "
@@ -340,19 +353,24 @@ read_pri_intelext(struct map_info *map, __u16 adr)
extp->BlkStatusRegMask = le16_to_cpu(extp->BlkStatusRegMask);
extp->ProtRegAddr = le16_to_cpu(extp->ProtRegAddr);
- if (extp->MajorVersion == '1' && extp->MinorVersion >= '3') {
- unsigned int extra_size = 0;
- int nb_parts, i;
+ if (extp->MinorVersion >= '0') {
+ extra_size = 0;
/* Protection Register info */
extra_size += (extp->NumProtectionFields - 1) *
sizeof(struct cfi_intelext_otpinfo);
+ }
+ if (extp->MinorVersion >= '1') {
/* Burst Read info */
extra_size += 2;
if (extp_size < sizeof(*extp) + extra_size)
goto need_more;
- extra_size += extp->extra[extra_size-1];
+ extra_size += extp->extra[extra_size - 1];
+ }
+
+ if (extp->MinorVersion >= '3') {
+ int nb_parts, i;
/* Number of hardware-partitions */
extra_size += 1;
diff --git a/drivers/mtd/chips/jedec_probe.c b/drivers/mtd/chips/jedec_probe.c
index e824b9b9b056..ccc4cfc7e4b5 100644
--- a/drivers/mtd/chips/jedec_probe.c
+++ b/drivers/mtd/chips/jedec_probe.c
@@ -166,6 +166,7 @@
#define SST39LF040 0x00D7
#define SST39SF010A 0x00B5
#define SST39SF020A 0x00B6
+#define SST39SF040 0x00B7
#define SST49LF004B 0x0060
#define SST49LF040B 0x0050
#define SST49LF008A 0x005a
@@ -1393,6 +1394,18 @@ static const struct amd_flash_info jedec_table[] = {
}
}, {
.mfr_id = MANUFACTURER_SST,
+ .dev_id = SST39SF040,
+ .name = "SST 39SF040",
+ .devtypes = CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x5555_0x2AAA,
+ .dev_size = SIZE_512KiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 1,
+ .regions = {
+ ERASEINFO(0x01000,128),
+ }
+ }, {
+ .mfr_id = MANUFACTURER_SST,
.dev_id = SST49LF040B,
.name = "SST 49LF040B",
.devtypes = CFI_DEVICETYPE_X8,
diff --git a/drivers/mtd/cmdlinepart.c b/drivers/mtd/cmdlinepart.c
index 5011fa73f918..1479da6d3aa6 100644
--- a/drivers/mtd/cmdlinepart.c
+++ b/drivers/mtd/cmdlinepart.c
@@ -194,7 +194,7 @@ static struct mtd_partition * newpart(char *s,
parts[this_part].name = extra_mem;
extra_mem += name_len + 1;
- dbg(("partition %d: name <%s>, offset %x, size %x, mask flags %x\n",
+ dbg(("partition %d: name <%s>, offset %llx, size %llx, mask flags %x\n",
this_part,
parts[this_part].name,
parts[this_part].offset,
diff --git a/drivers/mtd/devices/m25p80.c b/drivers/mtd/devices/m25p80.c
index cc6369ea67dd..ae5fe91867e1 100644
--- a/drivers/mtd/devices/m25p80.c
+++ b/drivers/mtd/devices/m25p80.c
@@ -54,7 +54,7 @@
#define SR_SRWD 0x80 /* SR write protect */
/* Define max times to check status register before we give up. */
-#define MAX_READY_WAIT_JIFFIES (10 * HZ) /* eg. M25P128 specs 6s max sector erase */
+#define MAX_READY_WAIT_JIFFIES (40 * HZ) /* M25P16 specs 40s max chip erase */
#define CMD_SIZE 4
#ifdef CONFIG_M25PXX_USE_FAST_READ
@@ -500,6 +500,9 @@ static struct flash_info __devinitdata m25p_data [] = {
{ "at26df161a", 0x1f4601, 0, 64 * 1024, 32, SECT_4K, },
{ "at26df321", 0x1f4701, 0, 64 * 1024, 64, SECT_4K, },
+ /* Macronix */
+ { "mx25l12805d", 0xc22018, 0, 64 * 1024, 256, },
+
/* Spansion -- single (large) sector size only, at least
* for the chips listed here (without boot sectors).
*/
@@ -528,6 +531,7 @@ static struct flash_info __devinitdata m25p_data [] = {
{ "m25p64", 0x202017, 0, 64 * 1024, 128, },
{ "m25p128", 0x202018, 0, 256 * 1024, 64, },
+ { "m45pe10", 0x204011, 0, 64 * 1024, 2, },
{ "m45pe80", 0x204014, 0, 64 * 1024, 16, },
{ "m45pe16", 0x204015, 0, 64 * 1024, 32, },
diff --git a/drivers/mtd/inftlcore.c b/drivers/mtd/inftlcore.c
index 73f05227dc8c..d8cf29c01cc4 100644
--- a/drivers/mtd/inftlcore.c
+++ b/drivers/mtd/inftlcore.c
@@ -226,7 +226,7 @@ static u16 INFTL_findfreeblock(struct INFTLrecord *inftl, int desperate)
if (!desperate && inftl->numfreeEUNs < 2) {
DEBUG(MTD_DEBUG_LEVEL1, "INFTL: there are too few free "
"EUNs (%d)\n", inftl->numfreeEUNs);
- return 0xffff;
+ return BLOCK_NIL;
}
/* Scan for a free block */
@@ -281,7 +281,8 @@ static u16 INFTL_foldchain(struct INFTLrecord *inftl, unsigned thisVUC, unsigned
silly = MAX_LOOPS;
while (thisEUN < inftl->nb_blocks) {
for (block = 0; block < inftl->EraseSize/SECTORSIZE; block ++) {
- if ((BlockMap[block] != 0xffff) || BlockDeleted[block])
+ if ((BlockMap[block] != BLOCK_NIL) ||
+ BlockDeleted[block])
continue;
if (inftl_read_oob(mtd, (thisEUN * inftl->EraseSize)
@@ -525,7 +526,7 @@ static inline u16 INFTL_findwriteunit(struct INFTLrecord *inftl, unsigned block)
if (!silly--) {
printk(KERN_WARNING "INFTL: infinite loop in "
"Virtual Unit Chain 0x%x\n", thisVUC);
- return 0xffff;
+ return BLOCK_NIL;
}
/* Skip to next block in chain */
@@ -549,7 +550,7 @@ hitused:
* waiting to be picked up. We're going to have to fold
* a chain to make room.
*/
- thisEUN = INFTL_makefreeblock(inftl, 0xffff);
+ thisEUN = INFTL_makefreeblock(inftl, BLOCK_NIL);
/*
* Hopefully we free something, lets try again.
@@ -631,7 +632,7 @@ hitused:
printk(KERN_WARNING "INFTL: error folding to make room for Virtual "
"Unit Chain 0x%x\n", thisVUC);
- return 0xffff;
+ return BLOCK_NIL;
}
/*
diff --git a/drivers/mtd/maps/Kconfig b/drivers/mtd/maps/Kconfig
index 82923bd2d9c5..0b98654d8eed 100644
--- a/drivers/mtd/maps/Kconfig
+++ b/drivers/mtd/maps/Kconfig
@@ -105,15 +105,6 @@ config MSP_FLASH_MAP_LIMIT
default "0x02000000"
depends on MSP_FLASH_MAP_LIMIT_32M
-config MTD_PMC_MSP_RAMROOT
- tristate "Embedded RAM block device for root on PMC-Sierra MSP"
- depends on PMC_MSP_EMBEDDED_ROOTFS && \
- (MTD_BLOCK || MTD_BLOCK_RO) && \
- MTD_RAM
- help
- This provides support for the embedded root file system
- on PMC MSP devices. This memory is mapped as a MTD block device.
-
config MTD_SUN_UFLASH
tristate "Sun Microsystems userflash support"
depends on SPARC && MTD_CFI && PCI
@@ -270,7 +261,7 @@ config MTD_ALCHEMY
config MTD_DILNETPC
tristate "CFI Flash device mapped on DIL/Net PC"
- depends on X86 && MTD_CONCAT && MTD_PARTITIONS && MTD_CFI_INTELEXT
+ depends on X86 && MTD_CONCAT && MTD_PARTITIONS && MTD_CFI_INTELEXT && BROKEN
help
MTD map driver for SSV DIL/Net PC Boards "DNP" and "ADNP".
For details, see <http://www.ssv-embedded.de/ssv/pc104/p169.htm>
@@ -501,7 +492,7 @@ config MTD_BFIN_ASYNC
If compiled as a module, it will be called bfin-async-flash.
config MTD_UCLINUX
- tristate "Generic uClinux RAM/ROM filesystem support"
+ bool "Generic uClinux RAM/ROM filesystem support"
depends on MTD_PARTITIONS && MTD_RAM && !MMU
help
Map driver to support image based filesystems for uClinux.
diff --git a/drivers/mtd/maps/Makefile b/drivers/mtd/maps/Makefile
index 2dbc1bec8488..8bae7f9850c0 100644
--- a/drivers/mtd/maps/Makefile
+++ b/drivers/mtd/maps/Makefile
@@ -25,7 +25,6 @@ obj-$(CONFIG_MTD_OCTAGON) += octagon-5066.o
obj-$(CONFIG_MTD_PHYSMAP) += physmap.o
obj-$(CONFIG_MTD_PHYSMAP_OF) += physmap_of.o
obj-$(CONFIG_MTD_PMC_MSP_EVM) += pmcmsp-flash.o
-obj-$(CONFIG_MTD_PMC_MSP_RAMROOT)+= pmcmsp-ramroot.o
obj-$(CONFIG_MTD_PCMCIA) += pcmciamtd.o
obj-$(CONFIG_MTD_RPXLITE) += rpxlite.o
obj-$(CONFIG_MTD_TQM8XXL) += tqm8xxl.o
diff --git a/drivers/mtd/maps/bfin-async-flash.c b/drivers/mtd/maps/bfin-async-flash.c
index 576611f605db..365c77b1b871 100644
--- a/drivers/mtd/maps/bfin-async-flash.c
+++ b/drivers/mtd/maps/bfin-async-flash.c
@@ -40,6 +40,9 @@ struct async_state {
uint32_t flash_ambctl0, flash_ambctl1;
uint32_t save_ambctl0, save_ambctl1;
unsigned long irq_flags;
+#ifdef CONFIG_MTD_PARTITIONS
+ struct mtd_partition *parts;
+#endif
};
static void switch_to_flash(struct async_state *state)
@@ -170,6 +173,7 @@ static int __devinit bfin_flash_probe(struct platform_device *pdev)
if (ret > 0) {
pr_devinit(KERN_NOTICE DRIVER_NAME ": Using commandline partition definition\n");
add_mtd_partitions(state->mtd, pdata->parts, ret);
+ state->parts = pdata->parts;
} else if (pdata->nr_parts) {
pr_devinit(KERN_NOTICE DRIVER_NAME ": Using board partition definition\n");
@@ -193,6 +197,7 @@ static int __devexit bfin_flash_remove(struct platform_device *pdev)
gpio_free(state->enet_flash_pin);
#ifdef CONFIG_MTD_PARTITIONS
del_mtd_partitions(state->mtd);
+ kfree(state->parts);
#endif
map_destroy(state->mtd);
kfree(state);
diff --git a/drivers/mtd/maps/integrator-flash.c b/drivers/mtd/maps/integrator-flash.c
index c9681a339a59..2aac41bde8b3 100644
--- a/drivers/mtd/maps/integrator-flash.c
+++ b/drivers/mtd/maps/integrator-flash.c
@@ -36,27 +36,31 @@
#include <linux/mtd/mtd.h>
#include <linux/mtd/map.h>
#include <linux/mtd/partitions.h>
+#include <linux/mtd/concat.h>
#include <asm/mach/flash.h>
#include <mach/hardware.h>
#include <asm/system.h>
-#ifdef CONFIG_ARCH_P720T
-#define FLASH_BASE (0x04000000)
-#define FLASH_SIZE (64*1024*1024)
-#endif
+struct armflash_subdev_info {
+ char *name;
+ struct mtd_info *mtd;
+ struct map_info map;
+ struct flash_platform_data *plat;
+};
struct armflash_info {
- struct flash_platform_data *plat;
struct resource *res;
struct mtd_partition *parts;
struct mtd_info *mtd;
- struct map_info map;
+ int nr_subdev;
+ struct armflash_subdev_info subdev[0];
};
static void armflash_set_vpp(struct map_info *map, int on)
{
- struct armflash_info *info = container_of(map, struct armflash_info, map);
+ struct armflash_subdev_info *info =
+ container_of(map, struct armflash_subdev_info, map);
if (info->plat && info->plat->set_vpp)
info->plat->set_vpp(on);
@@ -64,32 +68,17 @@ static void armflash_set_vpp(struct map_info *map, int on)
static const char *probes[] = { "cmdlinepart", "RedBoot", "afs", NULL };
-static int armflash_probe(struct platform_device *dev)
+static int armflash_subdev_probe(struct armflash_subdev_info *subdev,
+ struct resource *res)
{
- struct flash_platform_data *plat = dev->dev.platform_data;
- struct resource *res = dev->resource;
- unsigned int size = res->end - res->start + 1;
- struct armflash_info *info;
- int err;
+ struct flash_platform_data *plat = subdev->plat;
+ resource_size_t size = res->end - res->start + 1;
void __iomem *base;
+ int err = 0;
- info = kzalloc(sizeof(struct armflash_info), GFP_KERNEL);
- if (!info) {
- err = -ENOMEM;
- goto out;
- }
-
- info->plat = plat;
- if (plat && plat->init) {
- err = plat->init();
- if (err)
- goto no_resource;
- }
-
- info->res = request_mem_region(res->start, size, "armflash");
- if (!info->res) {
+ if (!request_mem_region(res->start, size, subdev->name)) {
err = -EBUSY;
- goto no_resource;
+ goto out;
}
base = ioremap(res->start, size);
@@ -101,27 +90,140 @@ static int armflash_probe(struct platform_device *dev)
/*
* look for CFI based flash parts fitted to this board
*/
- info->map.size = size;
- info->map.bankwidth = plat->width;
- info->map.phys = res->start;
- info->map.virt = base;
- info->map.name = dev_name(&dev->dev);
- info->map.set_vpp = armflash_set_vpp;
+ subdev->map.size = size;
+ subdev->map.bankwidth = plat->width;
+ subdev->map.phys = res->start;
+ subdev->map.virt = base;
+ subdev->map.name = subdev->name;
+ subdev->map.set_vpp = armflash_set_vpp;
- simple_map_init(&info->map);
+ simple_map_init(&subdev->map);
/*
* Also, the CFI layer automatically works out what size
* of chips we have, and does the necessary identification
* for us automatically.
*/
- info->mtd = do_map_probe(plat->map_name, &info->map);
- if (!info->mtd) {
+ subdev->mtd = do_map_probe(plat->map_name, &subdev->map);
+ if (!subdev->mtd) {
err = -ENXIO;
goto no_device;
}
- info->mtd->owner = THIS_MODULE;
+ subdev->mtd->owner = THIS_MODULE;
+
+ /* Successful? */
+ if (err == 0)
+ return err;
+
+ if (subdev->mtd)
+ map_destroy(subdev->mtd);
+ no_device:
+ iounmap(base);
+ no_mem:
+ release_mem_region(res->start, size);
+ out:
+ return err;
+}
+
+static void armflash_subdev_remove(struct armflash_subdev_info *subdev)
+{
+ if (subdev->mtd)
+ map_destroy(subdev->mtd);
+ if (subdev->map.virt)
+ iounmap(subdev->map.virt);
+ kfree(subdev->name);
+ subdev->name = NULL;
+ release_mem_region(subdev->map.phys, subdev->map.size);
+}
+
+static int armflash_probe(struct platform_device *dev)
+{
+ struct flash_platform_data *plat = dev->dev.platform_data;
+ unsigned int size;
+ struct armflash_info *info;
+ int i, nr, err;
+
+ /* Count the number of devices */
+ for (nr = 0; ; nr++)
+ if (!platform_get_resource(dev, IORESOURCE_MEM, nr))
+ break;
+ if (nr == 0) {
+ err = -ENODEV;
+ goto out;
+ }
+
+ size = sizeof(struct armflash_info) +
+ sizeof(struct armflash_subdev_info) * nr;
+ info = kzalloc(size, GFP_KERNEL);
+ if (!info) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ if (plat && plat->init) {
+ err = plat->init();
+ if (err)
+ goto no_resource;
+ }
+
+ for (i = 0; i < nr; i++) {
+ struct armflash_subdev_info *subdev = &info->subdev[i];
+ struct resource *res;
+
+ res = platform_get_resource(dev, IORESOURCE_MEM, i);
+ if (!res)
+ break;
+
+ if (nr == 1)
+ /* No MTD concatenation, just use the default name */
+ subdev->name = kstrdup(dev_name(&dev->dev), GFP_KERNEL);
+ else
+ subdev->name = kasprintf(GFP_KERNEL, "%s-%d",
+ dev_name(&dev->dev), i);
+ if (!subdev->name) {
+ err = -ENOMEM;
+ break;
+ }
+ subdev->plat = plat;
+
+ err = armflash_subdev_probe(subdev, res);
+ if (err) {
+ kfree(subdev->name);
+ subdev->name = NULL;
+ break;
+ }
+ }
+ info->nr_subdev = i;
+
+ if (err)
+ goto subdev_err;
+
+ if (info->nr_subdev == 1)
+ info->mtd = info->subdev[0].mtd;
+ else if (info->nr_subdev > 1) {
+#ifdef CONFIG_MTD_CONCAT
+ struct mtd_info *cdev[info->nr_subdev];
+
+ /*
+ * We detected multiple devices. Concatenate them together.
+ */
+ for (i = 0; i < info->nr_subdev; i++)
+ cdev[i] = info->subdev[i].mtd;
+
+ info->mtd = mtd_concat_create(cdev, info->nr_subdev,
+ dev_name(&dev->dev));
+ if (info->mtd == NULL)
+ err = -ENXIO;
+#else
+ printk(KERN_ERR "armflash: multiple devices found but "
+ "MTD concat support disabled.\n");
+ err = -ENXIO;
+#endif
+ }
+
+ if (err < 0)
+ goto cleanup;
err = parse_mtd_partitions(info->mtd, probes, &info->parts, 0);
if (err > 0) {
@@ -131,28 +233,30 @@ static int armflash_probe(struct platform_device *dev)
"mtd partition registration failed: %d\n", err);
}
- if (err == 0)
+ if (err == 0) {
platform_set_drvdata(dev, info);
+ return err;
+ }
/*
- * If we got an error, free all resources.
+ * We got an error, free all resources.
*/
- if (err < 0) {
- if (info->mtd) {
- del_mtd_partitions(info->mtd);
- map_destroy(info->mtd);
- }
- kfree(info->parts);
-
- no_device:
- iounmap(base);
- no_mem:
- release_mem_region(res->start, size);
- no_resource:
- if (plat && plat->exit)
- plat->exit();
- kfree(info);
+ cleanup:
+ if (info->mtd) {
+ del_mtd_partitions(info->mtd);
+#ifdef CONFIG_MTD_CONCAT
+ if (info->mtd != info->subdev[0].mtd)
+ mtd_concat_destroy(info->mtd);
+#endif
}
+ kfree(info->parts);
+ subdev_err:
+ for (i = info->nr_subdev - 1; i >= 0; i--)
+ armflash_subdev_remove(&info->subdev[i]);
+ no_resource:
+ if (plat && plat->exit)
+ plat->exit();
+ kfree(info);
out:
return err;
}
@@ -160,22 +264,26 @@ static int armflash_probe(struct platform_device *dev)
static int armflash_remove(struct platform_device *dev)
{
struct armflash_info *info = platform_get_drvdata(dev);
+ struct flash_platform_data *plat = dev->dev.platform_data;
+ int i;
platform_set_drvdata(dev, NULL);
if (info) {
if (info->mtd) {
del_mtd_partitions(info->mtd);
- map_destroy(info->mtd);
+#ifdef CONFIG_MTD_CONCAT
+ if (info->mtd != info->subdev[0].mtd)
+ mtd_concat_destroy(info->mtd);
+#endif
}
kfree(info->parts);
- iounmap(info->map.virt);
- release_resource(info->res);
- kfree(info->res);
+ for (i = info->nr_subdev - 1; i >= 0; i--)
+ armflash_subdev_remove(&info->subdev[i]);
- if (info->plat && info->plat->exit)
- info->plat->exit();
+ if (plat && plat->exit)
+ plat->exit();
kfree(info);
}
diff --git a/drivers/mtd/maps/physmap.c b/drivers/mtd/maps/physmap.c
index 29a901157352..380648e9051a 100644
--- a/drivers/mtd/maps/physmap.c
+++ b/drivers/mtd/maps/physmap.c
@@ -195,42 +195,6 @@ err_out:
}
#ifdef CONFIG_PM
-static int physmap_flash_suspend(struct platform_device *dev, pm_message_t state)
-{
- struct physmap_flash_info *info = platform_get_drvdata(dev);
- int ret = 0;
- int i;
-
- for (i = 0; i < MAX_RESOURCES && info->mtd[i]; i++)
- if (info->mtd[i]->suspend) {
- ret = info->mtd[i]->suspend(info->mtd[i]);
- if (ret)
- goto fail;
- }
-
- return 0;
-fail:
- for (--i; i >= 0; --i)
- if (info->mtd[i]->suspend) {
- BUG_ON(!info->mtd[i]->resume);
- info->mtd[i]->resume(info->mtd[i]);
- }
-
- return ret;
-}
-
-static int physmap_flash_resume(struct platform_device *dev)
-{
- struct physmap_flash_info *info = platform_get_drvdata(dev);
- int i;
-
- for (i = 0; i < MAX_RESOURCES && info->mtd[i]; i++)
- if (info->mtd[i]->resume)
- info->mtd[i]->resume(info->mtd[i]);
-
- return 0;
-}
-
static void physmap_flash_shutdown(struct platform_device *dev)
{
struct physmap_flash_info *info = platform_get_drvdata(dev);
@@ -242,16 +206,12 @@ static void physmap_flash_shutdown(struct platform_device *dev)
info->mtd[i]->resume(info->mtd[i]);
}
#else
-#define physmap_flash_suspend NULL
-#define physmap_flash_resume NULL
#define physmap_flash_shutdown NULL
#endif
static struct platform_driver physmap_flash_driver = {
.probe = physmap_flash_probe,
.remove = physmap_flash_remove,
- .suspend = physmap_flash_suspend,
- .resume = physmap_flash_resume,
.shutdown = physmap_flash_shutdown,
.driver = {
.name = "physmap-flash",
diff --git a/drivers/mtd/maps/physmap_of.c b/drivers/mtd/maps/physmap_of.c
index c83a60fada53..39d357b2eb47 100644
--- a/drivers/mtd/maps/physmap_of.c
+++ b/drivers/mtd/maps/physmap_of.c
@@ -20,16 +20,23 @@
#include <linux/mtd/mtd.h>
#include <linux/mtd/map.h>
#include <linux/mtd/partitions.h>
+#include <linux/mtd/concat.h>
#include <linux/of.h>
#include <linux/of_platform.h>
+struct of_flash_list {
+ struct mtd_info *mtd;
+ struct map_info map;
+ struct resource *res;
+};
+
struct of_flash {
- struct mtd_info *mtd;
- struct map_info map;
- struct resource *res;
+ struct mtd_info *cmtd;
#ifdef CONFIG_MTD_PARTITIONS
struct mtd_partition *parts;
#endif
+ int list_size; /* number of elements in of_flash_list */
+ struct of_flash_list list[0];
};
#ifdef CONFIG_MTD_PARTITIONS
@@ -88,30 +95,44 @@ static int parse_obsolete_partitions(struct of_device *dev,
static int of_flash_remove(struct of_device *dev)
{
struct of_flash *info;
+ int i;
info = dev_get_drvdata(&dev->dev);
if (!info)
return 0;
dev_set_drvdata(&dev->dev, NULL);
- if (info->mtd) {
+#ifdef CONFIG_MTD_CONCAT
+ if (info->cmtd != info->list[0].mtd) {
+ del_mtd_device(info->cmtd);
+ mtd_concat_destroy(info->cmtd);
+ }
+#endif
+
+ if (info->cmtd) {
if (OF_FLASH_PARTS(info)) {
- del_mtd_partitions(info->mtd);
+ del_mtd_partitions(info->cmtd);
kfree(OF_FLASH_PARTS(info));
} else {
- del_mtd_device(info->mtd);
+ del_mtd_device(info->cmtd);
}
- map_destroy(info->mtd);
}
- if (info->map.virt)
- iounmap(info->map.virt);
+ for (i = 0; i < info->list_size; i++) {
+ if (info->list[i].mtd)
+ map_destroy(info->list[i].mtd);
- if (info->res) {
- release_resource(info->res);
- kfree(info->res);
+ if (info->list[i].map.virt)
+ iounmap(info->list[i].map.virt);
+
+ if (info->list[i].res) {
+ release_resource(info->list[i].res);
+ kfree(info->list[i].res);
+ }
}
+ kfree(info);
+
return 0;
}
@@ -164,68 +185,130 @@ static int __devinit of_flash_probe(struct of_device *dev,
const char *probe_type = match->data;
const u32 *width;
int err;
-
- err = -ENXIO;
- if (of_address_to_resource(dp, 0, &res)) {
- dev_err(&dev->dev, "Can't get IO address from device tree\n");
+ int i;
+ int count;
+ const u32 *p;
+ int reg_tuple_size;
+ struct mtd_info **mtd_list = NULL;
+
+ reg_tuple_size = (of_n_addr_cells(dp) + of_n_size_cells(dp)) * sizeof(u32);
+
+ /*
+ * Get number of "reg" tuples. Scan for MTD devices on area's
+ * described by each "reg" region. This makes it possible (including
+ * the concat support) to support the Intel P30 48F4400 chips which
+ * consists internally of 2 non-identical NOR chips on one die.
+ */
+ p = of_get_property(dp, "reg", &count);
+ if (count % reg_tuple_size != 0) {
+ dev_err(&dev->dev, "Malformed reg property on %s\n",
+ dev->node->full_name);
+ err = -EINVAL;
goto err_out;
}
-
- dev_dbg(&dev->dev, "of_flash device: %.8llx-%.8llx\n",
- (unsigned long long)res.start, (unsigned long long)res.end);
+ count /= reg_tuple_size;
err = -ENOMEM;
- info = kzalloc(sizeof(*info), GFP_KERNEL);
+ info = kzalloc(sizeof(struct of_flash) +
+ sizeof(struct of_flash_list) * count, GFP_KERNEL);
+ if (!info)
+ goto err_out;
+
+ mtd_list = kzalloc(sizeof(struct mtd_info) * count, GFP_KERNEL);
if (!info)
goto err_out;
dev_set_drvdata(&dev->dev, info);
- err = -EBUSY;
- info->res = request_mem_region(res.start, res.end - res.start + 1,
- dev_name(&dev->dev));
- if (!info->res)
- goto err_out;
+ for (i = 0; i < count; i++) {
+ err = -ENXIO;
+ if (of_address_to_resource(dp, i, &res)) {
+ dev_err(&dev->dev, "Can't get IO address from device"
+ " tree\n");
+ goto err_out;
+ }
- err = -ENXIO;
- width = of_get_property(dp, "bank-width", NULL);
- if (!width) {
- dev_err(&dev->dev, "Can't get bank width from device tree\n");
- goto err_out;
- }
+ dev_dbg(&dev->dev, "of_flash device: %.8llx-%.8llx\n",
+ (unsigned long long)res.start,
+ (unsigned long long)res.end);
+
+ err = -EBUSY;
+ info->list[i].res = request_mem_region(res.start, res.end -
+ res.start + 1,
+ dev_name(&dev->dev));
+ if (!info->list[i].res)
+ goto err_out;
+
+ err = -ENXIO;
+ width = of_get_property(dp, "bank-width", NULL);
+ if (!width) {
+ dev_err(&dev->dev, "Can't get bank width from device"
+ " tree\n");
+ goto err_out;
+ }
- info->map.name = dev_name(&dev->dev);
- info->map.phys = res.start;
- info->map.size = res.end - res.start + 1;
- info->map.bankwidth = *width;
+ info->list[i].map.name = dev_name(&dev->dev);
+ info->list[i].map.phys = res.start;
+ info->list[i].map.size = res.end - res.start + 1;
+ info->list[i].map.bankwidth = *width;
+
+ err = -ENOMEM;
+ info->list[i].map.virt = ioremap(info->list[i].map.phys,
+ info->list[i].map.size);
+ if (!info->list[i].map.virt) {
+ dev_err(&dev->dev, "Failed to ioremap() flash"
+ " region\n");
+ goto err_out;
+ }
- err = -ENOMEM;
- info->map.virt = ioremap(info->map.phys, info->map.size);
- if (!info->map.virt) {
- dev_err(&dev->dev, "Failed to ioremap() flash region\n");
- goto err_out;
- }
+ simple_map_init(&info->list[i].map);
- simple_map_init(&info->map);
+ if (probe_type) {
+ info->list[i].mtd = do_map_probe(probe_type,
+ &info->list[i].map);
+ } else {
+ info->list[i].mtd = obsolete_probe(dev,
+ &info->list[i].map);
+ }
+ mtd_list[i] = info->list[i].mtd;
- if (probe_type)
- info->mtd = do_map_probe(probe_type, &info->map);
- else
- info->mtd = obsolete_probe(dev, &info->map);
+ err = -ENXIO;
+ if (!info->list[i].mtd) {
+ dev_err(&dev->dev, "do_map_probe() failed\n");
+ goto err_out;
+ } else {
+ info->list_size++;
+ }
+ info->list[i].mtd->owner = THIS_MODULE;
+ info->list[i].mtd->dev.parent = &dev->dev;
+ }
- err = -ENXIO;
- if (!info->mtd) {
- dev_err(&dev->dev, "do_map_probe() failed\n");
- goto err_out;
+ err = 0;
+ if (info->list_size == 1) {
+ info->cmtd = info->list[0].mtd;
+ } else if (info->list_size > 1) {
+ /*
+ * We detected multiple devices. Concatenate them together.
+ */
+#ifdef CONFIG_MTD_CONCAT
+ info->cmtd = mtd_concat_create(mtd_list, info->list_size,
+ dev_name(&dev->dev));
+ if (info->cmtd == NULL)
+ err = -ENXIO;
+#else
+ printk(KERN_ERR "physmap_of: multiple devices "
+ "found but MTD concat support disabled.\n");
+ err = -ENXIO;
+#endif
}
- info->mtd->owner = THIS_MODULE;
- info->mtd->dev.parent = &dev->dev;
+ if (err)
+ goto err_out;
#ifdef CONFIG_MTD_PARTITIONS
/* First look for RedBoot table or partitions on the command
* line, these take precedence over device tree information */
- err = parse_mtd_partitions(info->mtd, part_probe_types,
- &info->parts, 0);
+ err = parse_mtd_partitions(info->cmtd, part_probe_types,
+ &info->parts, 0);
if (err < 0)
return err;
@@ -244,15 +327,19 @@ static int __devinit of_flash_probe(struct of_device *dev,
}
if (err > 0)
- add_mtd_partitions(info->mtd, info->parts, err);
+ add_mtd_partitions(info->cmtd, info->parts, err);
else
#endif
- add_mtd_device(info->mtd);
+ add_mtd_device(info->cmtd);
+
+ kfree(mtd_list);
return 0;
err_out:
+ kfree(mtd_list);
of_flash_remove(dev);
+
return err;
}
diff --git a/drivers/mtd/maps/pmcmsp-ramroot.c b/drivers/mtd/maps/pmcmsp-ramroot.c
deleted file mode 100644
index 30de5c0c09a9..000000000000
--- a/drivers/mtd/maps/pmcmsp-ramroot.c
+++ /dev/null
@@ -1,104 +0,0 @@
-/*
- * Mapping of the rootfs in a physical region of memory
- *
- * Copyright (C) 2005-2007 PMC-Sierra Inc.
- * Author: Andrew Hughes, Andrew_Hughes@pmc-sierra.com
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
- * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
- * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
- * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
- * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 675 Mass Ave, Cambridge, MA 02139, USA.
- */
-
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/slab.h>
-#include <linux/fs.h>
-#include <linux/root_dev.h>
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/map.h>
-
-#include <asm/io.h>
-
-#include <msp_prom.h>
-
-static struct mtd_info *rr_mtd;
-
-struct map_info rr_map = {
- .name = "ramroot",
- .bankwidth = 4,
-};
-
-static int __init init_rrmap(void)
-{
- void *ramroot_start;
- unsigned long ramroot_size;
-
- /* Check for supported rootfs types */
- if (get_ramroot(&ramroot_start, &ramroot_size)) {
- rr_map.phys = CPHYSADDR(ramroot_start);
- rr_map.size = ramroot_size;
-
- printk(KERN_NOTICE
- "PMC embedded root device: 0x%08lx @ 0x%08lx\n",
- rr_map.size, (unsigned long)rr_map.phys);
- } else {
- printk(KERN_ERR
- "init_rrmap: no supported embedded rootfs detected!\n");
- return -ENXIO;
- }
-
- /* Map rootfs to I/O space for block device driver */
- rr_map.virt = ioremap(rr_map.phys, rr_map.size);
- if (!rr_map.virt) {
- printk(KERN_ERR "Failed to ioremap\n");
- return -EIO;
- }
-
- simple_map_init(&rr_map);
-
- rr_mtd = do_map_probe("map_ram", &rr_map);
- if (rr_mtd) {
- rr_mtd->owner = THIS_MODULE;
-
- add_mtd_device(rr_mtd);
-
- return 0;
- }
-
- iounmap(rr_map.virt);
- return -ENXIO;
-}
-
-static void __exit cleanup_rrmap(void)
-{
- del_mtd_device(rr_mtd);
- map_destroy(rr_mtd);
-
- iounmap(rr_map.virt);
- rr_map.virt = NULL;
-}
-
-MODULE_AUTHOR("PMC-Sierra, Inc");
-MODULE_DESCRIPTION("MTD map driver for embedded PMC-Sierra MSP filesystem");
-MODULE_LICENSE("GPL");
-
-module_init(init_rrmap);
-module_exit(cleanup_rrmap);
diff --git a/drivers/mtd/maps/pxa2xx-flash.c b/drivers/mtd/maps/pxa2xx-flash.c
index 572d32fdf38a..643aa06b599e 100644
--- a/drivers/mtd/maps/pxa2xx-flash.c
+++ b/drivers/mtd/maps/pxa2xx-flash.c
@@ -140,24 +140,6 @@ static int __devexit pxa2xx_flash_remove(struct platform_device *dev)
}
#ifdef CONFIG_PM
-static int pxa2xx_flash_suspend(struct platform_device *dev, pm_message_t state)
-{
- struct pxa2xx_flash_info *info = platform_get_drvdata(dev);
- int ret = 0;
-
- if (info->mtd && info->mtd->suspend)
- ret = info->mtd->suspend(info->mtd);
- return ret;
-}
-
-static int pxa2xx_flash_resume(struct platform_device *dev)
-{
- struct pxa2xx_flash_info *info = platform_get_drvdata(dev);
-
- if (info->mtd && info->mtd->resume)
- info->mtd->resume(info->mtd);
- return 0;
-}
static void pxa2xx_flash_shutdown(struct platform_device *dev)
{
struct pxa2xx_flash_info *info = platform_get_drvdata(dev);
@@ -166,8 +148,6 @@ static void pxa2xx_flash_shutdown(struct platform_device *dev)
info->mtd->resume(info->mtd);
}
#else
-#define pxa2xx_flash_suspend NULL
-#define pxa2xx_flash_resume NULL
#define pxa2xx_flash_shutdown NULL
#endif
@@ -178,8 +158,6 @@ static struct platform_driver pxa2xx_flash_driver = {
},
.probe = pxa2xx_flash_probe,
.remove = __devexit_p(pxa2xx_flash_remove),
- .suspend = pxa2xx_flash_suspend,
- .resume = pxa2xx_flash_resume,
.shutdown = pxa2xx_flash_shutdown,
};
diff --git a/drivers/mtd/maps/rbtx4939-flash.c b/drivers/mtd/maps/rbtx4939-flash.c
index d39f0adac846..83ed64512c5e 100644
--- a/drivers/mtd/maps/rbtx4939-flash.c
+++ b/drivers/mtd/maps/rbtx4939-flash.c
@@ -145,25 +145,6 @@ err_out:
}
#ifdef CONFIG_PM
-static int rbtx4939_flash_suspend(struct platform_device *dev,
- pm_message_t state)
-{
- struct rbtx4939_flash_info *info = platform_get_drvdata(dev);
-
- if (info->mtd->suspend)
- return info->mtd->suspend(info->mtd);
- return 0;
-}
-
-static int rbtx4939_flash_resume(struct platform_device *dev)
-{
- struct rbtx4939_flash_info *info = platform_get_drvdata(dev);
-
- if (info->mtd->resume)
- info->mtd->resume(info->mtd);
- return 0;
-}
-
static void rbtx4939_flash_shutdown(struct platform_device *dev)
{
struct rbtx4939_flash_info *info = platform_get_drvdata(dev);
@@ -173,16 +154,12 @@ static void rbtx4939_flash_shutdown(struct platform_device *dev)
info->mtd->resume(info->mtd);
}
#else
-#define rbtx4939_flash_suspend NULL
-#define rbtx4939_flash_resume NULL
#define rbtx4939_flash_shutdown NULL
#endif
static struct platform_driver rbtx4939_flash_driver = {
.probe = rbtx4939_flash_probe,
.remove = rbtx4939_flash_remove,
- .suspend = rbtx4939_flash_suspend,
- .resume = rbtx4939_flash_resume,
.shutdown = rbtx4939_flash_shutdown,
.driver = {
.name = "rbtx4939-flash",
diff --git a/drivers/mtd/maps/sa1100-flash.c b/drivers/mtd/maps/sa1100-flash.c
index 05e9362dc7f0..c6210f5118d1 100644
--- a/drivers/mtd/maps/sa1100-flash.c
+++ b/drivers/mtd/maps/sa1100-flash.c
@@ -415,25 +415,6 @@ static int __exit sa1100_mtd_remove(struct platform_device *pdev)
}
#ifdef CONFIG_PM
-static int sa1100_mtd_suspend(struct platform_device *dev, pm_message_t state)
-{
- struct sa_info *info = platform_get_drvdata(dev);
- int ret = 0;
-
- if (info)
- ret = info->mtd->suspend(info->mtd);
-
- return ret;
-}
-
-static int sa1100_mtd_resume(struct platform_device *dev)
-{
- struct sa_info *info = platform_get_drvdata(dev);
- if (info)
- info->mtd->resume(info->mtd);
- return 0;
-}
-
static void sa1100_mtd_shutdown(struct platform_device *dev)
{
struct sa_info *info = platform_get_drvdata(dev);
@@ -441,16 +422,12 @@ static void sa1100_mtd_shutdown(struct platform_device *dev)
info->mtd->resume(info->mtd);
}
#else
-#define sa1100_mtd_suspend NULL
-#define sa1100_mtd_resume NULL
#define sa1100_mtd_shutdown NULL
#endif
static struct platform_driver sa1100_mtd_driver = {
.probe = sa1100_mtd_probe,
.remove = __exit_p(sa1100_mtd_remove),
- .suspend = sa1100_mtd_suspend,
- .resume = sa1100_mtd_resume,
.shutdown = sa1100_mtd_shutdown,
.driver = {
.name = "sa1100-mtd",
diff --git a/drivers/mtd/maps/uclinux.c b/drivers/mtd/maps/uclinux.c
index 81756e397711..d4314fb88212 100644
--- a/drivers/mtd/maps/uclinux.c
+++ b/drivers/mtd/maps/uclinux.c
@@ -22,15 +22,19 @@
/****************************************************************************/
+extern char _ebss;
+
struct map_info uclinux_ram_map = {
.name = "RAM",
+ .phys = (unsigned long)&_ebss,
+ .size = 0,
};
-struct mtd_info *uclinux_ram_mtdinfo;
+static struct mtd_info *uclinux_ram_mtdinfo;
/****************************************************************************/
-struct mtd_partition uclinux_romfs[] = {
+static struct mtd_partition uclinux_romfs[] = {
{ .name = "ROMfs" }
};
@@ -38,7 +42,7 @@ struct mtd_partition uclinux_romfs[] = {
/****************************************************************************/
-int uclinux_point(struct mtd_info *mtd, loff_t from, size_t len,
+static int uclinux_point(struct mtd_info *mtd, loff_t from, size_t len,
size_t *retlen, void **virt, resource_size_t *phys)
{
struct map_info *map = mtd->priv;
@@ -55,12 +59,10 @@ static int __init uclinux_mtd_init(void)
{
struct mtd_info *mtd;
struct map_info *mapp;
- extern char _ebss;
- unsigned long addr = (unsigned long) &_ebss;
mapp = &uclinux_ram_map;
- mapp->phys = addr;
- mapp->size = PAGE_ALIGN(ntohl(*((unsigned long *)(addr + 8))));
+ if (!mapp->size)
+ mapp->size = PAGE_ALIGN(ntohl(*((unsigned long *)(mapp->phys + 8))));
mapp->bankwidth = 4;
printk("uclinux[mtd]: RAM probe address=0x%x size=0x%x\n",
diff --git a/drivers/mtd/mtd_blkdevs.c b/drivers/mtd/mtd_blkdevs.c
index aaac3b6800b7..c3f62654b6df 100644
--- a/drivers/mtd/mtd_blkdevs.c
+++ b/drivers/mtd/mtd_blkdevs.c
@@ -291,7 +291,7 @@ int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new)
gd->private_data = new;
new->blkcore_priv = gd;
gd->queue = tr->blkcore_priv->rq;
- gd->driverfs_dev = new->mtd->dev.parent;
+ gd->driverfs_dev = &new->mtd->dev;
if (new->readonly)
set_disk_ro(gd, 1);
diff --git a/drivers/mtd/mtdchar.c b/drivers/mtd/mtdchar.c
index 763d3f0a1f42..5b081cb84351 100644
--- a/drivers/mtd/mtdchar.c
+++ b/drivers/mtd/mtdchar.c
@@ -14,6 +14,7 @@
#include <linux/sched.h>
#include <linux/smp_lock.h>
#include <linux/backing-dev.h>
+#include <linux/compat.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/compatmac.h>
@@ -355,6 +356,100 @@ static int otp_select_filemode(struct mtd_file_info *mfi, int mode)
# define otp_select_filemode(f,m) -EOPNOTSUPP
#endif
+static int mtd_do_writeoob(struct file *file, struct mtd_info *mtd,
+ uint64_t start, uint32_t length, void __user *ptr,
+ uint32_t __user *retp)
+{
+ struct mtd_oob_ops ops;
+ uint32_t retlen;
+ int ret = 0;
+
+ if (!(file->f_mode & FMODE_WRITE))
+ return -EPERM;
+
+ if (length > 4096)
+ return -EINVAL;
+
+ if (!mtd->write_oob)
+ ret = -EOPNOTSUPP;
+ else
+ ret = access_ok(VERIFY_READ, ptr, length) ? 0 : EFAULT;
+
+ if (ret)
+ return ret;
+
+ ops.ooblen = length;
+ ops.ooboffs = start & (mtd->oobsize - 1);
+ ops.datbuf = NULL;
+ ops.mode = MTD_OOB_PLACE;
+
+ if (ops.ooboffs && ops.ooblen > (mtd->oobsize - ops.ooboffs))
+ return -EINVAL;
+
+ ops.oobbuf = kmalloc(length, GFP_KERNEL);
+ if (!ops.oobbuf)
+ return -ENOMEM;
+
+ if (copy_from_user(ops.oobbuf, ptr, length)) {
+ kfree(ops.oobbuf);
+ return -EFAULT;
+ }
+
+ start &= ~((uint64_t)mtd->oobsize - 1);
+ ret = mtd->write_oob(mtd, start, &ops);
+
+ if (ops.oobretlen > 0xFFFFFFFFU)
+ ret = -EOVERFLOW;
+ retlen = ops.oobretlen;
+ if (copy_to_user(retp, &retlen, sizeof(length)))
+ ret = -EFAULT;
+
+ kfree(ops.oobbuf);
+ return ret;
+}
+
+static int mtd_do_readoob(struct mtd_info *mtd, uint64_t start,
+ uint32_t length, void __user *ptr, uint32_t __user *retp)
+{
+ struct mtd_oob_ops ops;
+ int ret = 0;
+
+ if (length > 4096)
+ return -EINVAL;
+
+ if (!mtd->read_oob)
+ ret = -EOPNOTSUPP;
+ else
+ ret = access_ok(VERIFY_WRITE, ptr,
+ length) ? 0 : -EFAULT;
+ if (ret)
+ return ret;
+
+ ops.ooblen = length;
+ ops.ooboffs = start & (mtd->oobsize - 1);
+ ops.datbuf = NULL;
+ ops.mode = MTD_OOB_PLACE;
+
+ if (ops.ooboffs && ops.ooblen > (mtd->oobsize - ops.ooboffs))
+ return -EINVAL;
+
+ ops.oobbuf = kmalloc(length, GFP_KERNEL);
+ if (!ops.oobbuf)
+ return -ENOMEM;
+
+ start &= ~((uint64_t)mtd->oobsize - 1);
+ ret = mtd->read_oob(mtd, start, &ops);
+
+ if (put_user(ops.oobretlen, retp))
+ ret = -EFAULT;
+ else if (ops.oobretlen && copy_to_user(ptr, ops.oobbuf,
+ ops.oobretlen))
+ ret = -EFAULT;
+
+ kfree(ops.oobbuf);
+ return ret;
+}
+
static int mtd_ioctl(struct inode *inode, struct file *file,
u_int cmd, u_long arg)
{
@@ -417,6 +512,7 @@ static int mtd_ioctl(struct inode *inode, struct file *file,
break;
case MEMERASE:
+ case MEMERASE64:
{
struct erase_info *erase;
@@ -427,20 +523,32 @@ static int mtd_ioctl(struct inode *inode, struct file *file,
if (!erase)
ret = -ENOMEM;
else {
- struct erase_info_user einfo;
-
wait_queue_head_t waitq;
DECLARE_WAITQUEUE(wait, current);
init_waitqueue_head(&waitq);
- if (copy_from_user(&einfo, argp,
- sizeof(struct erase_info_user))) {
- kfree(erase);
- return -EFAULT;
+ if (cmd == MEMERASE64) {
+ struct erase_info_user64 einfo64;
+
+ if (copy_from_user(&einfo64, argp,
+ sizeof(struct erase_info_user64))) {
+ kfree(erase);
+ return -EFAULT;
+ }
+ erase->addr = einfo64.start;
+ erase->len = einfo64.length;
+ } else {
+ struct erase_info_user einfo32;
+
+ if (copy_from_user(&einfo32, argp,
+ sizeof(struct erase_info_user))) {
+ kfree(erase);
+ return -EFAULT;
+ }
+ erase->addr = einfo32.start;
+ erase->len = einfo32.length;
}
- erase->addr = einfo.start;
- erase->len = einfo.length;
erase->mtd = mtd;
erase->callback = mtdchar_erase_callback;
erase->priv = (unsigned long)&waitq;
@@ -474,100 +582,56 @@ static int mtd_ioctl(struct inode *inode, struct file *file,
case MEMWRITEOOB:
{
struct mtd_oob_buf buf;
- struct mtd_oob_ops ops;
- struct mtd_oob_buf __user *user_buf = argp;
- uint32_t retlen;
-
- if(!(file->f_mode & FMODE_WRITE))
- return -EPERM;
-
- if (copy_from_user(&buf, argp, sizeof(struct mtd_oob_buf)))
- return -EFAULT;
-
- if (buf.length > 4096)
- return -EINVAL;
-
- if (!mtd->write_oob)
- ret = -EOPNOTSUPP;
- else
- ret = access_ok(VERIFY_READ, buf.ptr,
- buf.length) ? 0 : EFAULT;
-
- if (ret)
- return ret;
-
- ops.ooblen = buf.length;
- ops.ooboffs = buf.start & (mtd->oobsize - 1);
- ops.datbuf = NULL;
- ops.mode = MTD_OOB_PLACE;
-
- if (ops.ooboffs && ops.ooblen > (mtd->oobsize - ops.ooboffs))
- return -EINVAL;
-
- ops.oobbuf = kmalloc(buf.length, GFP_KERNEL);
- if (!ops.oobbuf)
- return -ENOMEM;
-
- if (copy_from_user(ops.oobbuf, buf.ptr, buf.length)) {
- kfree(ops.oobbuf);
- return -EFAULT;
- }
+ struct mtd_oob_buf __user *buf_user = argp;
- buf.start &= ~(mtd->oobsize - 1);
- ret = mtd->write_oob(mtd, buf.start, &ops);
-
- if (ops.oobretlen > 0xFFFFFFFFU)
- ret = -EOVERFLOW;
- retlen = ops.oobretlen;
- if (copy_to_user(&user_buf->length, &retlen, sizeof(buf.length)))
+ /* NOTE: writes return length to buf_user->length */
+ if (copy_from_user(&buf, argp, sizeof(buf)))
ret = -EFAULT;
-
- kfree(ops.oobbuf);
+ else
+ ret = mtd_do_writeoob(file, mtd, buf.start, buf.length,
+ buf.ptr, &buf_user->length);
break;
-
}
case MEMREADOOB:
{
struct mtd_oob_buf buf;
- struct mtd_oob_ops ops;
-
- if (copy_from_user(&buf, argp, sizeof(struct mtd_oob_buf)))
- return -EFAULT;
-
- if (buf.length > 4096)
- return -EINVAL;
+ struct mtd_oob_buf __user *buf_user = argp;
- if (!mtd->read_oob)
- ret = -EOPNOTSUPP;
+ /* NOTE: writes return length to buf_user->start */
+ if (copy_from_user(&buf, argp, sizeof(buf)))
+ ret = -EFAULT;
else
- ret = access_ok(VERIFY_WRITE, buf.ptr,
- buf.length) ? 0 : -EFAULT;
- if (ret)
- return ret;
-
- ops.ooblen = buf.length;
- ops.ooboffs = buf.start & (mtd->oobsize - 1);
- ops.datbuf = NULL;
- ops.mode = MTD_OOB_PLACE;
+ ret = mtd_do_readoob(mtd, buf.start, buf.length,
+ buf.ptr, &buf_user->start);
+ break;
+ }
- if (ops.ooboffs && ops.ooblen > (mtd->oobsize - ops.ooboffs))
- return -EINVAL;
+ case MEMWRITEOOB64:
+ {
+ struct mtd_oob_buf64 buf;
+ struct mtd_oob_buf64 __user *buf_user = argp;
- ops.oobbuf = kmalloc(buf.length, GFP_KERNEL);
- if (!ops.oobbuf)
- return -ENOMEM;
+ if (copy_from_user(&buf, argp, sizeof(buf)))
+ ret = -EFAULT;
+ else
+ ret = mtd_do_writeoob(file, mtd, buf.start, buf.length,
+ (void __user *)(uintptr_t)buf.usr_ptr,
+ &buf_user->length);
+ break;
+ }
- buf.start &= ~(mtd->oobsize - 1);
- ret = mtd->read_oob(mtd, buf.start, &ops);
+ case MEMREADOOB64:
+ {
+ struct mtd_oob_buf64 buf;
+ struct mtd_oob_buf64 __user *buf_user = argp;
- if (put_user(ops.oobretlen, (uint32_t __user *)argp))
- ret = -EFAULT;
- else if (ops.oobretlen && copy_to_user(buf.ptr, ops.oobbuf,
- ops.oobretlen))
+ if (copy_from_user(&buf, argp, sizeof(buf)))
ret = -EFAULT;
-
- kfree(ops.oobbuf);
+ else
+ ret = mtd_do_readoob(mtd, buf.start, buf.length,
+ (void __user *)(uintptr_t)buf.usr_ptr,
+ &buf_user->length);
break;
}
@@ -758,6 +822,68 @@ static int mtd_ioctl(struct inode *inode, struct file *file,
return ret;
} /* memory_ioctl */
+#ifdef CONFIG_COMPAT
+
+struct mtd_oob_buf32 {
+ u_int32_t start;
+ u_int32_t length;
+ compat_caddr_t ptr; /* unsigned char* */
+};
+
+#define MEMWRITEOOB32 _IOWR('M', 3, struct mtd_oob_buf32)
+#define MEMREADOOB32 _IOWR('M', 4, struct mtd_oob_buf32)
+
+static long mtd_compat_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ struct inode *inode = file->f_path.dentry->d_inode;
+ struct mtd_file_info *mfi = file->private_data;
+ struct mtd_info *mtd = mfi->mtd;
+ void __user *argp = compat_ptr(arg);
+ int ret = 0;
+
+ lock_kernel();
+
+ switch (cmd) {
+ case MEMWRITEOOB32:
+ {
+ struct mtd_oob_buf32 buf;
+ struct mtd_oob_buf32 __user *buf_user = argp;
+
+ if (copy_from_user(&buf, argp, sizeof(buf)))
+ ret = -EFAULT;
+ else
+ ret = mtd_do_writeoob(file, mtd, buf.start,
+ buf.length, compat_ptr(buf.ptr),
+ &buf_user->length);
+ break;
+ }
+
+ case MEMREADOOB32:
+ {
+ struct mtd_oob_buf32 buf;
+ struct mtd_oob_buf32 __user *buf_user = argp;
+
+ /* NOTE: writes return length to buf->start */
+ if (copy_from_user(&buf, argp, sizeof(buf)))
+ ret = -EFAULT;
+ else
+ ret = mtd_do_readoob(mtd, buf.start,
+ buf.length, compat_ptr(buf.ptr),
+ &buf_user->start);
+ break;
+ }
+ default:
+ ret = mtd_ioctl(inode, file, cmd, (unsigned long)argp);
+ }
+
+ unlock_kernel();
+
+ return ret;
+}
+
+#endif /* CONFIG_COMPAT */
+
/*
* try to determine where a shared mapping can be made
* - only supported for NOMMU at the moment (MMU can't doesn't copy private
@@ -817,6 +943,9 @@ static const struct file_operations mtd_fops = {
.read = mtd_read,
.write = mtd_write,
.ioctl = mtd_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = mtd_compat_ioctl,
+#endif
.open = mtd_open,
.release = mtd_close,
.mmap = mtd_mmap,
diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c
index bccb4b1ffc46..fac54a3fa3f1 100644
--- a/drivers/mtd/mtdcore.c
+++ b/drivers/mtd/mtdcore.c
@@ -23,8 +23,15 @@
#include "mtdcore.h"
-
-static struct class *mtd_class;
+static int mtd_cls_suspend(struct device *dev, pm_message_t state);
+static int mtd_cls_resume(struct device *dev);
+
+static struct class mtd_class = {
+ .name = "mtd",
+ .owner = THIS_MODULE,
+ .suspend = mtd_cls_suspend,
+ .resume = mtd_cls_resume,
+};
/* These are exported solely for the purpose of mtd_blkdevs.c. You
should not use them for _anything_ else */
@@ -52,7 +59,26 @@ static void mtd_release(struct device *dev)
/* remove /dev/mtdXro node if needed */
if (index)
- device_destroy(mtd_class, index + 1);
+ device_destroy(&mtd_class, index + 1);
+}
+
+static int mtd_cls_suspend(struct device *dev, pm_message_t state)
+{
+ struct mtd_info *mtd = dev_to_mtd(dev);
+
+ if (mtd->suspend)
+ return mtd->suspend(mtd);
+ else
+ return 0;
+}
+
+static int mtd_cls_resume(struct device *dev)
+{
+ struct mtd_info *mtd = dev_to_mtd(dev);
+
+ if (mtd->resume)
+ mtd->resume(mtd);
+ return 0;
}
static ssize_t mtd_type_show(struct device *dev,
@@ -269,7 +295,7 @@ int add_mtd_device(struct mtd_info *mtd)
* physical device.
*/
mtd->dev.type = &mtd_devtype;
- mtd->dev.class = mtd_class;
+ mtd->dev.class = &mtd_class;
mtd->dev.devt = MTD_DEVT(i);
dev_set_name(&mtd->dev, "mtd%d", i);
if (device_register(&mtd->dev) != 0) {
@@ -278,7 +304,7 @@ int add_mtd_device(struct mtd_info *mtd)
}
if (MTD_DEVT(i))
- device_create(mtd_class, mtd->dev.parent,
+ device_create(&mtd_class, mtd->dev.parent,
MTD_DEVT(i) + 1,
NULL, "mtd%dro", i);
@@ -604,11 +630,12 @@ done:
static int __init init_mtd(void)
{
- mtd_class = class_create(THIS_MODULE, "mtd");
+ int ret;
+ ret = class_register(&mtd_class);
- if (IS_ERR(mtd_class)) {
- pr_err("Error creating mtd class.\n");
- return PTR_ERR(mtd_class);
+ if (ret) {
+ pr_err("Error registering mtd class: %d\n", ret);
+ return ret;
}
#ifdef CONFIG_PROC_FS
if ((proc_mtd = create_proc_entry( "mtd", 0, NULL )))
@@ -623,7 +650,7 @@ static void __exit cleanup_mtd(void)
if (proc_mtd)
remove_proc_entry( "mtd", NULL);
#endif /* CONFIG_PROC_FS */
- class_destroy(mtd_class);
+ class_unregister(&mtd_class);
}
module_init(init_mtd);
diff --git a/drivers/mtd/mtdpart.c b/drivers/mtd/mtdpart.c
index 29675edb44b4..349fcbe5cc0f 100644
--- a/drivers/mtd/mtdpart.c
+++ b/drivers/mtd/mtdpart.c
@@ -27,9 +27,7 @@ struct mtd_part {
struct mtd_info mtd;
struct mtd_info *master;
uint64_t offset;
- int index;
struct list_head list;
- int registered;
};
/*
@@ -321,8 +319,7 @@ int del_mtd_partitions(struct mtd_info *master)
list_for_each_entry_safe(slave, next, &mtd_partitions, list)
if (slave->master == master) {
list_del(&slave->list);
- if (slave->registered)
- del_mtd_device(&slave->mtd);
+ del_mtd_device(&slave->mtd);
kfree(slave);
}
@@ -395,7 +392,7 @@ static struct mtd_part *add_one_partition(struct mtd_info *master,
slave->mtd.get_fact_prot_info = part_get_fact_prot_info;
if (master->sync)
slave->mtd.sync = part_sync;
- if (!partno && master->suspend && master->resume) {
+ if (!partno && !master->dev.class && master->suspend && master->resume) {
slave->mtd.suspend = part_suspend;
slave->mtd.resume = part_resume;
}
@@ -412,7 +409,6 @@ static struct mtd_part *add_one_partition(struct mtd_info *master,
slave->mtd.erase = part_erase;
slave->master = master;
slave->offset = part->offset;
- slave->index = partno;
if (slave->offset == MTDPART_OFS_APPEND)
slave->offset = cur_offset;
@@ -500,15 +496,9 @@ static struct mtd_part *add_one_partition(struct mtd_info *master,
}
out_register:
- if (part->mtdp) {
- /* store the object pointer (caller may or may not register it*/
- *part->mtdp = &slave->mtd;
- slave->registered = 0;
- } else {
- /* register our partition */
- add_mtd_device(&slave->mtd);
- slave->registered = 1;
- }
+ /* register our partition */
+ add_mtd_device(&slave->mtd);
+
return slave;
}
diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig
index f3276897859e..ce96c091f01b 100644
--- a/drivers/mtd/nand/Kconfig
+++ b/drivers/mtd/nand/Kconfig
@@ -74,6 +74,12 @@ config MTD_NAND_AMS_DELTA
help
Support for NAND flash on Amstrad E3 (Delta).
+config MTD_NAND_OMAP2
+ tristate "NAND Flash device on OMAP2 and OMAP3"
+ depends on ARM && MTD_NAND && (ARCH_OMAP2 || ARCH_OMAP3)
+ help
+ Support for NAND flash on Texas Instruments OMAP2 and OMAP3 platforms.
+
config MTD_NAND_TS7250
tristate "NAND Flash device on TS-7250 board"
depends on MACH_TS72XX
@@ -139,27 +145,27 @@ config MTD_NAND_PPCHAMELEONEVB
This enables the NAND flash driver on the PPChameleon EVB Board.
config MTD_NAND_S3C2410
- tristate "NAND Flash support for S3C2410/S3C2440 SoC"
- depends on ARCH_S3C2410
+ tristate "NAND Flash support for Samsung S3C SoCs"
+ depends on ARCH_S3C2410 || ARCH_S3C64XX
help
- This enables the NAND flash controller on the S3C2410 and S3C2440
+ This enables the NAND flash controller on the S3C24xx and S3C64xx
SoCs
No board specific support is done by this driver, each board
must advertise a platform_device for the driver to attach.
config MTD_NAND_S3C2410_DEBUG
- bool "S3C2410 NAND driver debug"
+ bool "Samsung S3C NAND driver debug"
depends on MTD_NAND_S3C2410
help
- Enable debugging of the S3C2410 NAND driver
+ Enable debugging of the S3C NAND driver
config MTD_NAND_S3C2410_HWECC
- bool "S3C2410 NAND Hardware ECC"
+ bool "Samsung S3C NAND Hardware ECC"
depends on MTD_NAND_S3C2410
help
- Enable the use of the S3C2410's internal ECC generator when
- using NAND. Early versions of the chip have had problems with
+ Enable the use of the controller's internal ECC generator when
+ using NAND. Early versions of the chips have had problems with
incorrect ECC generation, and if using these, the default of
software ECC is preferable.
@@ -171,7 +177,7 @@ config MTD_NAND_NDFC
NDFC Nand Flash Controllers are integrated in IBM/AMCC's 4xx SoCs
config MTD_NAND_S3C2410_CLKSTOP
- bool "S3C2410 NAND IDLE clock stop"
+ bool "Samsung S3C NAND IDLE clock stop"
depends on MTD_NAND_S3C2410
default n
help
diff --git a/drivers/mtd/nand/Makefile b/drivers/mtd/nand/Makefile
index d33860ac42c3..f3a786b3cff3 100644
--- a/drivers/mtd/nand/Makefile
+++ b/drivers/mtd/nand/Makefile
@@ -25,6 +25,7 @@ obj-$(CONFIG_MTD_NAND_CS553X) += cs553x_nand.o
obj-$(CONFIG_MTD_NAND_NDFC) += ndfc.o
obj-$(CONFIG_MTD_NAND_ATMEL) += atmel_nand.o
obj-$(CONFIG_MTD_NAND_GPIO) += gpio.o
+obj-$(CONFIG_MTD_NAND_OMAP2) += omap2.o
obj-$(CONFIG_MTD_NAND_CM_X270) += cmx270_nand.o
obj-$(CONFIG_MTD_NAND_BASLER_EXCITE) += excite_nandflash.o
obj-$(CONFIG_MTD_NAND_PXA3xx) += pxa3xx_nand.o
diff --git a/drivers/mtd/nand/atmel_nand.c b/drivers/mtd/nand/atmel_nand.c
index 47a33cec3793..20c828ba9405 100644
--- a/drivers/mtd/nand/atmel_nand.c
+++ b/drivers/mtd/nand/atmel_nand.c
@@ -24,6 +24,7 @@
#include <linux/slab.h>
#include <linux/module.h>
+#include <linux/moduleparam.h>
#include <linux/platform_device.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/nand.h>
@@ -47,6 +48,9 @@
#define no_ecc 0
#endif
+static int on_flash_bbt = 0;
+module_param(on_flash_bbt, int, 0);
+
/* Register access macros */
#define ecc_readl(add, reg) \
__raw_readl(add + ATMEL_ECC_##reg)
@@ -459,12 +463,17 @@ static int __init atmel_nand_probe(struct platform_device *pdev)
if (host->board->det_pin) {
if (gpio_get_value(host->board->det_pin)) {
- printk("No SmartMedia card inserted.\n");
+ printk(KERN_INFO "No SmartMedia card inserted.\n");
res = ENXIO;
goto err_no_card;
}
}
+ if (on_flash_bbt) {
+ printk(KERN_INFO "atmel_nand: Use On Flash BBT\n");
+ nand_chip->options |= NAND_USE_FLASH_BBT;
+ }
+
/* first scan to find the device and get the page size */
if (nand_scan_ident(mtd, 1)) {
res = -ENXIO;
@@ -525,7 +534,7 @@ static int __init atmel_nand_probe(struct platform_device *pdev)
&num_partitions);
if ((!partitions) || (num_partitions == 0)) {
- printk(KERN_ERR "atmel_nand: No parititions defined, or unsupported device.\n");
+ printk(KERN_ERR "atmel_nand: No partitions defined, or unsupported device.\n");
res = ENXIO;
goto err_no_partitions;
}
diff --git a/drivers/mtd/nand/bf5xx_nand.c b/drivers/mtd/nand/bf5xx_nand.c
index 4c2a67ca801e..8506e7e606fd 100644
--- a/drivers/mtd/nand/bf5xx_nand.c
+++ b/drivers/mtd/nand/bf5xx_nand.c
@@ -458,7 +458,7 @@ static irqreturn_t bf5xx_nand_dma_irq(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static int bf5xx_nand_dma_rw(struct mtd_info *mtd,
+static void bf5xx_nand_dma_rw(struct mtd_info *mtd,
uint8_t *buf, int is_read)
{
struct bf5xx_nand_info *info = mtd_to_nand_info(mtd);
@@ -496,11 +496,20 @@ static int bf5xx_nand_dma_rw(struct mtd_info *mtd,
/* setup DMA register with Blackfin DMA API */
set_dma_config(CH_NFC, 0x0);
set_dma_start_addr(CH_NFC, (unsigned long) buf);
+
+/* The DMAs have different size on BF52x and BF54x */
+#ifdef CONFIG_BF52x
+ set_dma_x_count(CH_NFC, (page_size >> 1));
+ set_dma_x_modify(CH_NFC, 2);
+ val = DI_EN | WDSIZE_16;
+#endif
+
+#ifdef CONFIG_BF54x
set_dma_x_count(CH_NFC, (page_size >> 2));
set_dma_x_modify(CH_NFC, 4);
-
- /* setup write or read operation */
val = DI_EN | WDSIZE_32;
+#endif
+ /* setup write or read operation */
if (is_read)
val |= WNR;
set_dma_config(CH_NFC, val);
@@ -512,8 +521,6 @@ static int bf5xx_nand_dma_rw(struct mtd_info *mtd,
else
bfin_write_NFC_PGCTL(0x2);
wait_for_completion(&info->dma_completion);
-
- return 0;
}
static void bf5xx_nand_dma_read_buf(struct mtd_info *mtd,
diff --git a/drivers/mtd/nand/davinci_nand.c b/drivers/mtd/nand/davinci_nand.c
index 02700f769b8a..0fad6487e6f4 100644
--- a/drivers/mtd/nand/davinci_nand.c
+++ b/drivers/mtd/nand/davinci_nand.c
@@ -44,7 +44,7 @@
* and some flavors of secondary chipselect (e.g. based on A12) as used
* with multichip packages.
*
- * The 1-bit ECC hardware is supported, but not yet the newer 4-bit ECC
+ * The 1-bit ECC hardware is supported, as well as the newer 4-bit ECC
* available on chips like the DM355 and OMAP-L137 and needed with the
* more error-prone MLC NAND chips.
*
@@ -54,11 +54,14 @@
struct davinci_nand_info {
struct mtd_info mtd;
struct nand_chip chip;
+ struct nand_ecclayout ecclayout;
struct device *dev;
struct clk *clk;
bool partitioned;
+ bool is_readmode;
+
void __iomem *base;
void __iomem *vaddr;
@@ -73,6 +76,7 @@ struct davinci_nand_info {
};
static DEFINE_SPINLOCK(davinci_nand_lock);
+static bool ecc4_busy;
#define to_davinci_nand(m) container_of(m, struct davinci_nand_info, mtd)
@@ -218,6 +222,192 @@ static int nand_davinci_correct_1bit(struct mtd_info *mtd, u_char *dat,
/*----------------------------------------------------------------------*/
/*
+ * 4-bit hardware ECC ... context maintained over entire AEMIF
+ *
+ * This is a syndrome engine, but we avoid NAND_ECC_HW_SYNDROME
+ * since that forces use of a problematic "infix OOB" layout.
+ * Among other things, it trashes manufacturer bad block markers.
+ * Also, and specific to this hardware, it ECC-protects the "prepad"
+ * in the OOB ... while having ECC protection for parts of OOB would
+ * seem useful, the current MTD stack sometimes wants to update the
+ * OOB without recomputing ECC.
+ */
+
+static void nand_davinci_hwctl_4bit(struct mtd_info *mtd, int mode)
+{
+ struct davinci_nand_info *info = to_davinci_nand(mtd);
+ unsigned long flags;
+ u32 val;
+
+ spin_lock_irqsave(&davinci_nand_lock, flags);
+
+ /* Start 4-bit ECC calculation for read/write */
+ val = davinci_nand_readl(info, NANDFCR_OFFSET);
+ val &= ~(0x03 << 4);
+ val |= (info->core_chipsel << 4) | BIT(12);
+ davinci_nand_writel(info, NANDFCR_OFFSET, val);
+
+ info->is_readmode = (mode == NAND_ECC_READ);
+
+ spin_unlock_irqrestore(&davinci_nand_lock, flags);
+}
+
+/* Read raw ECC code after writing to NAND. */
+static void
+nand_davinci_readecc_4bit(struct davinci_nand_info *info, u32 code[4])
+{
+ const u32 mask = 0x03ff03ff;
+
+ code[0] = davinci_nand_readl(info, NAND_4BIT_ECC1_OFFSET) & mask;
+ code[1] = davinci_nand_readl(info, NAND_4BIT_ECC2_OFFSET) & mask;
+ code[2] = davinci_nand_readl(info, NAND_4BIT_ECC3_OFFSET) & mask;
+ code[3] = davinci_nand_readl(info, NAND_4BIT_ECC4_OFFSET) & mask;
+}
+
+/* Terminate read ECC; or return ECC (as bytes) of data written to NAND. */
+static int nand_davinci_calculate_4bit(struct mtd_info *mtd,
+ const u_char *dat, u_char *ecc_code)
+{
+ struct davinci_nand_info *info = to_davinci_nand(mtd);
+ u32 raw_ecc[4], *p;
+ unsigned i;
+
+ /* After a read, terminate ECC calculation by a dummy read
+ * of some 4-bit ECC register. ECC covers everything that
+ * was read; correct() just uses the hardware state, so
+ * ecc_code is not needed.
+ */
+ if (info->is_readmode) {
+ davinci_nand_readl(info, NAND_4BIT_ECC1_OFFSET);
+ return 0;
+ }
+
+ /* Pack eight raw 10-bit ecc values into ten bytes, making
+ * two passes which each convert four values (in upper and
+ * lower halves of two 32-bit words) into five bytes. The
+ * ROM boot loader uses this same packing scheme.
+ */
+ nand_davinci_readecc_4bit(info, raw_ecc);
+ for (i = 0, p = raw_ecc; i < 2; i++, p += 2) {
+ *ecc_code++ = p[0] & 0xff;
+ *ecc_code++ = ((p[0] >> 8) & 0x03) | ((p[0] >> 14) & 0xfc);
+ *ecc_code++ = ((p[0] >> 22) & 0x0f) | ((p[1] << 4) & 0xf0);
+ *ecc_code++ = ((p[1] >> 4) & 0x3f) | ((p[1] >> 10) & 0xc0);
+ *ecc_code++ = (p[1] >> 18) & 0xff;
+ }
+
+ return 0;
+}
+
+/* Correct up to 4 bits in data we just read, using state left in the
+ * hardware plus the ecc_code computed when it was first written.
+ */
+static int nand_davinci_correct_4bit(struct mtd_info *mtd,
+ u_char *data, u_char *ecc_code, u_char *null)
+{
+ int i;
+ struct davinci_nand_info *info = to_davinci_nand(mtd);
+ unsigned short ecc10[8];
+ unsigned short *ecc16;
+ u32 syndrome[4];
+ unsigned num_errors, corrected;
+
+ /* All bytes 0xff? It's an erased page; ignore its ECC. */
+ for (i = 0; i < 10; i++) {
+ if (ecc_code[i] != 0xff)
+ goto compare;
+ }
+ return 0;
+
+compare:
+ /* Unpack ten bytes into eight 10 bit values. We know we're
+ * little-endian, and use type punning for less shifting/masking.
+ */
+ if (WARN_ON(0x01 & (unsigned) ecc_code))
+ return -EINVAL;
+ ecc16 = (unsigned short *)ecc_code;
+
+ ecc10[0] = (ecc16[0] >> 0) & 0x3ff;
+ ecc10[1] = ((ecc16[0] >> 10) & 0x3f) | ((ecc16[1] << 6) & 0x3c0);
+ ecc10[2] = (ecc16[1] >> 4) & 0x3ff;
+ ecc10[3] = ((ecc16[1] >> 14) & 0x3) | ((ecc16[2] << 2) & 0x3fc);
+ ecc10[4] = (ecc16[2] >> 8) | ((ecc16[3] << 8) & 0x300);
+ ecc10[5] = (ecc16[3] >> 2) & 0x3ff;
+ ecc10[6] = ((ecc16[3] >> 12) & 0xf) | ((ecc16[4] << 4) & 0x3f0);
+ ecc10[7] = (ecc16[4] >> 6) & 0x3ff;
+
+ /* Tell ECC controller about the expected ECC codes. */
+ for (i = 7; i >= 0; i--)
+ davinci_nand_writel(info, NAND_4BIT_ECC_LOAD_OFFSET, ecc10[i]);
+
+ /* Allow time for syndrome calculation ... then read it.
+ * A syndrome of all zeroes 0 means no detected errors.
+ */
+ davinci_nand_readl(info, NANDFSR_OFFSET);
+ nand_davinci_readecc_4bit(info, syndrome);
+ if (!(syndrome[0] | syndrome[1] | syndrome[2] | syndrome[3]))
+ return 0;
+
+ /* Start address calculation, and wait for it to complete.
+ * We _could_ start reading more data while this is working,
+ * to speed up the overall page read.
+ */
+ davinci_nand_writel(info, NANDFCR_OFFSET,
+ davinci_nand_readl(info, NANDFCR_OFFSET) | BIT(13));
+ for (;;) {
+ u32 fsr = davinci_nand_readl(info, NANDFSR_OFFSET);
+
+ switch ((fsr >> 8) & 0x0f) {
+ case 0: /* no error, should not happen */
+ return 0;
+ case 1: /* five or more errors detected */
+ return -EIO;
+ case 2: /* error addresses computed */
+ case 3:
+ num_errors = 1 + ((fsr >> 16) & 0x03);
+ goto correct;
+ default: /* still working on it */
+ cpu_relax();
+ continue;
+ }
+ }
+
+correct:
+ /* correct each error */
+ for (i = 0, corrected = 0; i < num_errors; i++) {
+ int error_address, error_value;
+
+ if (i > 1) {
+ error_address = davinci_nand_readl(info,
+ NAND_ERR_ADD2_OFFSET);
+ error_value = davinci_nand_readl(info,
+ NAND_ERR_ERRVAL2_OFFSET);
+ } else {
+ error_address = davinci_nand_readl(info,
+ NAND_ERR_ADD1_OFFSET);
+ error_value = davinci_nand_readl(info,
+ NAND_ERR_ERRVAL1_OFFSET);
+ }
+
+ if (i & 1) {
+ error_address >>= 16;
+ error_value >>= 16;
+ }
+ error_address &= 0x3ff;
+ error_address = (512 + 7) - error_address;
+
+ if (error_address < 512) {
+ data[error_address] ^= error_value;
+ corrected++;
+ }
+ }
+
+ return corrected;
+}
+
+/*----------------------------------------------------------------------*/
+
+/*
* NOTE: NAND boot requires ALE == EM_A[1], CLE == EM_A[2], so that's
* how these chips are normally wired. This translates to both 8 and 16
* bit busses using ALE == BIT(3) in byte addresses, and CLE == BIT(4).
@@ -294,6 +484,23 @@ static void __init nand_dm6446evm_flash_init(struct davinci_nand_info *info)
/*----------------------------------------------------------------------*/
+/* An ECC layout for using 4-bit ECC with small-page flash, storing
+ * ten ECC bytes plus the manufacturer's bad block marker byte, and
+ * and not overlapping the default BBT markers.
+ */
+static struct nand_ecclayout hwecc4_small __initconst = {
+ .eccbytes = 10,
+ .eccpos = { 0, 1, 2, 3, 4,
+ /* offset 5 holds the badblock marker */
+ 6, 7,
+ 13, 14, 15, },
+ .oobfree = {
+ {.offset = 8, .length = 5, },
+ {.offset = 16, },
+ },
+};
+
+
static int __init nand_davinci_probe(struct platform_device *pdev)
{
struct davinci_nand_pdata *pdata = pdev->dev.platform_data;
@@ -306,6 +513,10 @@ static int __init nand_davinci_probe(struct platform_device *pdev)
uint32_t val;
nand_ecc_modes_t ecc_mode;
+ /* insist on board-specific configuration */
+ if (!pdata)
+ return -ENODEV;
+
/* which external chipselect will we be managing? */
if (pdev->id < 0 || pdev->id > 3)
return -ENODEV;
@@ -351,7 +562,7 @@ static int __init nand_davinci_probe(struct platform_device *pdev)
info->chip.select_chip = nand_davinci_select_chip;
/* options such as NAND_USE_FLASH_BBT or 16-bit widths */
- info->chip.options = pdata ? pdata->options : 0;
+ info->chip.options = pdata->options;
info->ioaddr = (uint32_t __force) vaddr;
@@ -360,14 +571,8 @@ static int __init nand_davinci_probe(struct platform_device *pdev)
info->mask_chipsel = pdata->mask_chipsel;
/* use nandboot-capable ALE/CLE masks by default */
- if (pdata && pdata->mask_ale)
- info->mask_ale = pdata->mask_cle;
- else
- info->mask_ale = MASK_ALE;
- if (pdata && pdata->mask_cle)
- info->mask_cle = pdata->mask_cle;
- else
- info->mask_cle = MASK_CLE;
+ info->mask_ale = pdata->mask_cle ? : MASK_ALE;
+ info->mask_cle = pdata->mask_cle ? : MASK_CLE;
/* Set address of hardware control function */
info->chip.cmd_ctrl = nand_davinci_hwcontrol;
@@ -377,30 +582,44 @@ static int __init nand_davinci_probe(struct platform_device *pdev)
info->chip.read_buf = nand_davinci_read_buf;
info->chip.write_buf = nand_davinci_write_buf;
- /* use board-specific ECC config; else, the best available */
- if (pdata)
- ecc_mode = pdata->ecc_mode;
- else
- ecc_mode = NAND_ECC_HW;
+ /* Use board-specific ECC config */
+ ecc_mode = pdata->ecc_mode;
+ ret = -EINVAL;
switch (ecc_mode) {
case NAND_ECC_NONE:
case NAND_ECC_SOFT:
+ pdata->ecc_bits = 0;
break;
case NAND_ECC_HW:
- info->chip.ecc.calculate = nand_davinci_calculate_1bit;
- info->chip.ecc.correct = nand_davinci_correct_1bit;
- info->chip.ecc.hwctl = nand_davinci_hwctl_1bit;
+ if (pdata->ecc_bits == 4) {
+ /* No sanity checks: CPUs must support this,
+ * and the chips may not use NAND_BUSWIDTH_16.
+ */
+
+ /* No sharing 4-bit hardware between chipselects yet */
+ spin_lock_irq(&davinci_nand_lock);
+ if (ecc4_busy)
+ ret = -EBUSY;
+ else
+ ecc4_busy = true;
+ spin_unlock_irq(&davinci_nand_lock);
+
+ if (ret == -EBUSY)
+ goto err_ecc;
+
+ info->chip.ecc.calculate = nand_davinci_calculate_4bit;
+ info->chip.ecc.correct = nand_davinci_correct_4bit;
+ info->chip.ecc.hwctl = nand_davinci_hwctl_4bit;
+ info->chip.ecc.bytes = 10;
+ } else {
+ info->chip.ecc.calculate = nand_davinci_calculate_1bit;
+ info->chip.ecc.correct = nand_davinci_correct_1bit;
+ info->chip.ecc.hwctl = nand_davinci_hwctl_1bit;
+ info->chip.ecc.bytes = 3;
+ }
info->chip.ecc.size = 512;
- info->chip.ecc.bytes = 3;
break;
- case NAND_ECC_HW_SYNDROME:
- /* FIXME implement */
- info->chip.ecc.size = 512;
- info->chip.ecc.bytes = 10;
-
- dev_warn(&pdev->dev, "4-bit ECC nyet supported\n");
- /* FALL THROUGH */
default:
ret = -EINVAL;
goto err_ecc;
@@ -441,12 +660,56 @@ static int __init nand_davinci_probe(struct platform_device *pdev)
spin_unlock_irq(&davinci_nand_lock);
/* Scan to find existence of the device(s) */
- ret = nand_scan(&info->mtd, pdata->mask_chipsel ? 2 : 1);
+ ret = nand_scan_ident(&info->mtd, pdata->mask_chipsel ? 2 : 1);
if (ret < 0) {
dev_dbg(&pdev->dev, "no NAND chip(s) found\n");
goto err_scan;
}
+ /* Update ECC layout if needed ... for 1-bit HW ECC, the default
+ * is OK, but it allocates 6 bytes when only 3 are needed (for
+ * each 512 bytes). For the 4-bit HW ECC, that default is not
+ * usable: 10 bytes are needed, not 6.
+ */
+ if (pdata->ecc_bits == 4) {
+ int chunks = info->mtd.writesize / 512;
+
+ if (!chunks || info->mtd.oobsize < 16) {
+ dev_dbg(&pdev->dev, "too small\n");
+ ret = -EINVAL;
+ goto err_scan;
+ }
+
+ /* For small page chips, preserve the manufacturer's
+ * badblock marking data ... and make sure a flash BBT
+ * table marker fits in the free bytes.
+ */
+ if (chunks == 1) {
+ info->ecclayout = hwecc4_small;
+ info->ecclayout.oobfree[1].length =
+ info->mtd.oobsize - 16;
+ goto syndrome_done;
+ }
+
+ /* For large page chips we'll be wanting to use a
+ * not-yet-implemented mode that reads OOB data
+ * before reading the body of the page, to avoid
+ * the "infix OOB" model of NAND_ECC_HW_SYNDROME
+ * (and preserve manufacturer badblock markings).
+ */
+ dev_warn(&pdev->dev, "no 4-bit ECC support yet "
+ "for large page NAND\n");
+ ret = -EIO;
+ goto err_scan;
+
+syndrome_done:
+ info->chip.ecc.layout = &info->ecclayout;
+ }
+
+ ret = nand_scan_tail(&info->mtd);
+ if (ret < 0)
+ goto err_scan;
+
if (mtd_has_partitions()) {
struct mtd_partition *mtd_parts = NULL;
int mtd_parts_nb = 0;
@@ -455,22 +718,11 @@ static int __init nand_davinci_probe(struct platform_device *pdev)
static const char *probes[] __initconst =
{ "cmdlinepart", NULL };
- const char *master_name;
-
- /* Set info->mtd.name = 0 temporarily */
- master_name = info->mtd.name;
- info->mtd.name = (char *)0;
-
- /* info->mtd.name == 0, means: don't bother checking
- <mtd-id> */
mtd_parts_nb = parse_mtd_partitions(&info->mtd, probes,
&mtd_parts, 0);
-
- /* Restore info->mtd.name */
- info->mtd.name = master_name;
}
- if (mtd_parts_nb <= 0 && pdata) {
+ if (mtd_parts_nb <= 0) {
mtd_parts = pdata->parts;
mtd_parts_nb = pdata->nr_parts;
}
@@ -483,7 +735,7 @@ static int __init nand_davinci_probe(struct platform_device *pdev)
info->partitioned = true;
}
- } else if (pdata && pdata->nr_parts) {
+ } else if (pdata->nr_parts) {
dev_warn(&pdev->dev, "ignoring %d default partitions on %s\n",
pdata->nr_parts, info->mtd.name);
}
@@ -509,6 +761,11 @@ err_scan:
err_clk_enable:
clk_put(info->clk);
+ spin_lock_irq(&davinci_nand_lock);
+ if (ecc_mode == NAND_ECC_HW_SYNDROME)
+ ecc4_busy = false;
+ spin_unlock_irq(&davinci_nand_lock);
+
err_ecc:
err_clk:
err_ioremap:
@@ -532,6 +789,11 @@ static int __exit nand_davinci_remove(struct platform_device *pdev)
else
status = del_mtd_device(&info->mtd);
+ spin_lock_irq(&davinci_nand_lock);
+ if (info->chip.ecc.mode == NAND_ECC_HW_SYNDROME)
+ ecc4_busy = false;
+ spin_unlock_irq(&davinci_nand_lock);
+
iounmap(info->base);
iounmap(info->vaddr);
diff --git a/drivers/mtd/nand/mxc_nand.c b/drivers/mtd/nand/mxc_nand.c
index 40c26080ecda..76beea40d2cf 100644
--- a/drivers/mtd/nand/mxc_nand.c
+++ b/drivers/mtd/nand/mxc_nand.c
@@ -138,7 +138,14 @@ static struct nand_ecclayout nand_hw_eccoob_8 = {
static struct nand_ecclayout nand_hw_eccoob_16 = {
.eccbytes = 5,
.eccpos = {6, 7, 8, 9, 10},
- .oobfree = {{0, 6}, {12, 4}, }
+ .oobfree = {{0, 5}, {11, 5}, }
+};
+
+static struct nand_ecclayout nand_hw_eccoob_64 = {
+ .eccbytes = 20,
+ .eccpos = {6, 7, 8, 9, 10, 22, 23, 24, 25, 26,
+ 38, 39, 40, 41, 42, 54, 55, 56, 57, 58},
+ .oobfree = {{2, 4}, {11, 10}, {27, 10}, {43, 10}, {59, 5}, }
};
#ifdef CONFIG_MTD_PARTITIONS
@@ -192,7 +199,7 @@ static void wait_op_done(struct mxc_nand_host *host, int max_retries,
}
udelay(1);
}
- if (max_retries <= 0)
+ if (max_retries < 0)
DEBUG(MTD_DEBUG_LEVEL0, "%s(%d): INT not set\n",
__func__, param);
}
@@ -795,9 +802,13 @@ static void mxc_nand_command(struct mtd_info *mtd, unsigned command,
send_addr(host, (page_addr & 0xff), false);
if (host->pagesize_2k) {
- send_addr(host, (page_addr >> 8) & 0xFF, false);
- if (mtd->size >= 0x40000000)
+ if (mtd->size >= 0x10000000) {
+ /* paddr_8 - paddr_15 */
+ send_addr(host, (page_addr >> 8) & 0xff, false);
send_addr(host, (page_addr >> 16) & 0xff, true);
+ } else
+ /* paddr_8 - paddr_15 */
+ send_addr(host, (page_addr >> 8) & 0xff, true);
} else {
/* One more address cycle for higher density devices */
if (mtd->size >= 0x4000000) {
@@ -923,7 +934,6 @@ static int __init mxcnd_probe(struct platform_device *pdev)
this->ecc.mode = NAND_ECC_HW;
this->ecc.size = 512;
this->ecc.bytes = 3;
- this->ecc.layout = &nand_hw_eccoob_8;
tmp = readw(host->regs + NFC_CONFIG1);
tmp |= NFC_ECC_EN;
writew(tmp, host->regs + NFC_CONFIG1);
@@ -957,12 +967,44 @@ static int __init mxcnd_probe(struct platform_device *pdev)
this->ecc.layout = &nand_hw_eccoob_16;
}
- host->pagesize_2k = 0;
+ /* first scan to find the device and get the page size */
+ if (nand_scan_ident(mtd, 1)) {
+ err = -ENXIO;
+ goto escan;
+ }
- /* Scan to find existence of the device */
- if (nand_scan(mtd, 1)) {
- DEBUG(MTD_DEBUG_LEVEL0,
- "MXC_ND: Unable to find any NAND device.\n");
+ host->pagesize_2k = (mtd->writesize == 2048) ? 1 : 0;
+
+ if (this->ecc.mode == NAND_ECC_HW) {
+ switch (mtd->oobsize) {
+ case 8:
+ this->ecc.layout = &nand_hw_eccoob_8;
+ break;
+ case 16:
+ this->ecc.layout = &nand_hw_eccoob_16;
+ break;
+ case 64:
+ this->ecc.layout = &nand_hw_eccoob_64;
+ break;
+ default:
+ /* page size not handled by HW ECC */
+ /* switching back to soft ECC */
+ this->ecc.size = 512;
+ this->ecc.bytes = 3;
+ this->ecc.layout = &nand_hw_eccoob_8;
+ this->ecc.mode = NAND_ECC_SOFT;
+ this->ecc.calculate = NULL;
+ this->ecc.correct = NULL;
+ this->ecc.hwctl = NULL;
+ tmp = readw(host->regs + NFC_CONFIG1);
+ tmp &= ~NFC_ECC_EN;
+ writew(tmp, host->regs + NFC_CONFIG1);
+ break;
+ }
+ }
+
+ /* second phase scan */
+ if (nand_scan_tail(mtd)) {
err = -ENXIO;
goto escan;
}
@@ -985,7 +1027,7 @@ static int __init mxcnd_probe(struct platform_device *pdev)
return 0;
escan:
- free_irq(host->irq, NULL);
+ free_irq(host->irq, host);
eirq:
iounmap(host->regs);
eres:
@@ -1005,7 +1047,7 @@ static int __devexit mxcnd_remove(struct platform_device *pdev)
platform_set_drvdata(pdev, NULL);
nand_release(&host->mtd);
- free_irq(host->irq, NULL);
+ free_irq(host->irq, host);
iounmap(host->regs);
kfree(host);
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
index 3d7ed432fa41..8c21b89d2d0c 100644
--- a/drivers/mtd/nand/nand_base.c
+++ b/drivers/mtd/nand/nand_base.c
@@ -2756,7 +2756,8 @@ int nand_scan_tail(struct mtd_info *mtd)
* the out of band area
*/
chip->ecc.layout->oobavail = 0;
- for (i = 0; chip->ecc.layout->oobfree[i].length; i++)
+ for (i = 0; chip->ecc.layout->oobfree[i].length
+ && i < ARRAY_SIZE(chip->ecc.layout->oobfree); i++)
chip->ecc.layout->oobavail +=
chip->ecc.layout->oobfree[i].length;
mtd->oobavail = chip->ecc.layout->oobavail;
diff --git a/drivers/mtd/nand/nand_ecc.c b/drivers/mtd/nand/nand_ecc.c
index 868147acce2c..c0cb87d6d16e 100644
--- a/drivers/mtd/nand/nand_ecc.c
+++ b/drivers/mtd/nand/nand_ecc.c
@@ -428,8 +428,8 @@ EXPORT_SYMBOL(nand_calculate_ecc);
int nand_correct_data(struct mtd_info *mtd, unsigned char *buf,
unsigned char *read_ecc, unsigned char *calc_ecc)
{
- unsigned char b0, b1, b2;
- unsigned char byte_addr, bit_addr;
+ unsigned char b0, b1, b2, bit_addr;
+ unsigned int byte_addr;
/* 256 or 512 bytes/ecc */
const uint32_t eccsize_mult =
(((struct nand_chip *)mtd->priv)->ecc.size) >> 8;
diff --git a/drivers/mtd/nand/omap2.c b/drivers/mtd/nand/omap2.c
new file mode 100644
index 000000000000..ebd07e95b814
--- /dev/null
+++ b/drivers/mtd/nand/omap2.c
@@ -0,0 +1,779 @@
+/*
+ * Copyright © 2004 Texas Instruments, Jian Zhang <jzhang@ti.com>
+ * Copyright © 2004 Micron Technology Inc.
+ * Copyright © 2004 David Brownell
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <linux/delay.h>
+#include <linux/jiffies.h>
+#include <linux/sched.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand.h>
+#include <linux/mtd/partitions.h>
+#include <linux/io.h>
+
+#include <asm/dma.h>
+
+#include <mach/gpmc.h>
+#include <mach/nand.h>
+
+#define GPMC_IRQ_STATUS 0x18
+#define GPMC_ECC_CONFIG 0x1F4
+#define GPMC_ECC_CONTROL 0x1F8
+#define GPMC_ECC_SIZE_CONFIG 0x1FC
+#define GPMC_ECC1_RESULT 0x200
+
+#define DRIVER_NAME "omap2-nand"
+
+/* size (4 KiB) for IO mapping */
+#define NAND_IO_SIZE SZ_4K
+
+#define NAND_WP_OFF 0
+#define NAND_WP_BIT 0x00000010
+#define WR_RD_PIN_MONITORING 0x00600000
+
+#define GPMC_BUF_FULL 0x00000001
+#define GPMC_BUF_EMPTY 0x00000000
+
+#define NAND_Ecc_P1e (1 << 0)
+#define NAND_Ecc_P2e (1 << 1)
+#define NAND_Ecc_P4e (1 << 2)
+#define NAND_Ecc_P8e (1 << 3)
+#define NAND_Ecc_P16e (1 << 4)
+#define NAND_Ecc_P32e (1 << 5)
+#define NAND_Ecc_P64e (1 << 6)
+#define NAND_Ecc_P128e (1 << 7)
+#define NAND_Ecc_P256e (1 << 8)
+#define NAND_Ecc_P512e (1 << 9)
+#define NAND_Ecc_P1024e (1 << 10)
+#define NAND_Ecc_P2048e (1 << 11)
+
+#define NAND_Ecc_P1o (1 << 16)
+#define NAND_Ecc_P2o (1 << 17)
+#define NAND_Ecc_P4o (1 << 18)
+#define NAND_Ecc_P8o (1 << 19)
+#define NAND_Ecc_P16o (1 << 20)
+#define NAND_Ecc_P32o (1 << 21)
+#define NAND_Ecc_P64o (1 << 22)
+#define NAND_Ecc_P128o (1 << 23)
+#define NAND_Ecc_P256o (1 << 24)
+#define NAND_Ecc_P512o (1 << 25)
+#define NAND_Ecc_P1024o (1 << 26)
+#define NAND_Ecc_P2048o (1 << 27)
+
+#define TF(value) (value ? 1 : 0)
+
+#define P2048e(a) (TF(a & NAND_Ecc_P2048e) << 0)
+#define P2048o(a) (TF(a & NAND_Ecc_P2048o) << 1)
+#define P1e(a) (TF(a & NAND_Ecc_P1e) << 2)
+#define P1o(a) (TF(a & NAND_Ecc_P1o) << 3)
+#define P2e(a) (TF(a & NAND_Ecc_P2e) << 4)
+#define P2o(a) (TF(a & NAND_Ecc_P2o) << 5)
+#define P4e(a) (TF(a & NAND_Ecc_P4e) << 6)
+#define P4o(a) (TF(a & NAND_Ecc_P4o) << 7)
+
+#define P8e(a) (TF(a & NAND_Ecc_P8e) << 0)
+#define P8o(a) (TF(a & NAND_Ecc_P8o) << 1)
+#define P16e(a) (TF(a & NAND_Ecc_P16e) << 2)
+#define P16o(a) (TF(a & NAND_Ecc_P16o) << 3)
+#define P32e(a) (TF(a & NAND_Ecc_P32e) << 4)
+#define P32o(a) (TF(a & NAND_Ecc_P32o) << 5)
+#define P64e(a) (TF(a & NAND_Ecc_P64e) << 6)
+#define P64o(a) (TF(a & NAND_Ecc_P64o) << 7)
+
+#define P128e(a) (TF(a & NAND_Ecc_P128e) << 0)
+#define P128o(a) (TF(a & NAND_Ecc_P128o) << 1)
+#define P256e(a) (TF(a & NAND_Ecc_P256e) << 2)
+#define P256o(a) (TF(a & NAND_Ecc_P256o) << 3)
+#define P512e(a) (TF(a & NAND_Ecc_P512e) << 4)
+#define P512o(a) (TF(a & NAND_Ecc_P512o) << 5)
+#define P1024e(a) (TF(a & NAND_Ecc_P1024e) << 6)
+#define P1024o(a) (TF(a & NAND_Ecc_P1024o) << 7)
+
+#define P8e_s(a) (TF(a & NAND_Ecc_P8e) << 0)
+#define P8o_s(a) (TF(a & NAND_Ecc_P8o) << 1)
+#define P16e_s(a) (TF(a & NAND_Ecc_P16e) << 2)
+#define P16o_s(a) (TF(a & NAND_Ecc_P16o) << 3)
+#define P1e_s(a) (TF(a & NAND_Ecc_P1e) << 4)
+#define P1o_s(a) (TF(a & NAND_Ecc_P1o) << 5)
+#define P2e_s(a) (TF(a & NAND_Ecc_P2e) << 6)
+#define P2o_s(a) (TF(a & NAND_Ecc_P2o) << 7)
+
+#define P4e_s(a) (TF(a & NAND_Ecc_P4e) << 0)
+#define P4o_s(a) (TF(a & NAND_Ecc_P4o) << 1)
+
+#ifdef CONFIG_MTD_PARTITIONS
+static const char *part_probes[] = { "cmdlinepart", NULL };
+#endif
+
+struct omap_nand_info {
+ struct nand_hw_control controller;
+ struct omap_nand_platform_data *pdata;
+ struct mtd_info mtd;
+ struct mtd_partition *parts;
+ struct nand_chip nand;
+ struct platform_device *pdev;
+
+ int gpmc_cs;
+ unsigned long phys_base;
+ void __iomem *gpmc_cs_baseaddr;
+ void __iomem *gpmc_baseaddr;
+};
+
+/**
+ * omap_nand_wp - This function enable or disable the Write Protect feature
+ * @mtd: MTD device structure
+ * @mode: WP ON/OFF
+ */
+static void omap_nand_wp(struct mtd_info *mtd, int mode)
+{
+ struct omap_nand_info *info = container_of(mtd,
+ struct omap_nand_info, mtd);
+
+ unsigned long config = __raw_readl(info->gpmc_baseaddr + GPMC_CONFIG);
+
+ if (mode)
+ config &= ~(NAND_WP_BIT); /* WP is ON */
+ else
+ config |= (NAND_WP_BIT); /* WP is OFF */
+
+ __raw_writel(config, (info->gpmc_baseaddr + GPMC_CONFIG));
+}
+
+/**
+ * omap_hwcontrol - hardware specific access to control-lines
+ * @mtd: MTD device structure
+ * @cmd: command to device
+ * @ctrl:
+ * NAND_NCE: bit 0 -> don't care
+ * NAND_CLE: bit 1 -> Command Latch
+ * NAND_ALE: bit 2 -> Address Latch
+ *
+ * NOTE: boards may use different bits for these!!
+ */
+static void omap_hwcontrol(struct mtd_info *mtd, int cmd, unsigned int ctrl)
+{
+ struct omap_nand_info *info = container_of(mtd,
+ struct omap_nand_info, mtd);
+ switch (ctrl) {
+ case NAND_CTRL_CHANGE | NAND_CTRL_CLE:
+ info->nand.IO_ADDR_W = info->gpmc_cs_baseaddr +
+ GPMC_CS_NAND_COMMAND;
+ info->nand.IO_ADDR_R = info->gpmc_cs_baseaddr +
+ GPMC_CS_NAND_DATA;
+ break;
+
+ case NAND_CTRL_CHANGE | NAND_CTRL_ALE:
+ info->nand.IO_ADDR_W = info->gpmc_cs_baseaddr +
+ GPMC_CS_NAND_ADDRESS;
+ info->nand.IO_ADDR_R = info->gpmc_cs_baseaddr +
+ GPMC_CS_NAND_DATA;
+ break;
+
+ case NAND_CTRL_CHANGE | NAND_NCE:
+ info->nand.IO_ADDR_W = info->gpmc_cs_baseaddr +
+ GPMC_CS_NAND_DATA;
+ info->nand.IO_ADDR_R = info->gpmc_cs_baseaddr +
+ GPMC_CS_NAND_DATA;
+ break;
+ }
+
+ if (cmd != NAND_CMD_NONE)
+ __raw_writeb(cmd, info->nand.IO_ADDR_W);
+}
+
+/**
+ * omap_read_buf16 - read data from NAND controller into buffer
+ * @mtd: MTD device structure
+ * @buf: buffer to store date
+ * @len: number of bytes to read
+ */
+static void omap_read_buf16(struct mtd_info *mtd, u_char *buf, int len)
+{
+ struct nand_chip *nand = mtd->priv;
+
+ __raw_readsw(nand->IO_ADDR_R, buf, len / 2);
+}
+
+/**
+ * omap_write_buf16 - write buffer to NAND controller
+ * @mtd: MTD device structure
+ * @buf: data buffer
+ * @len: number of bytes to write
+ */
+static void omap_write_buf16(struct mtd_info *mtd, const u_char * buf, int len)
+{
+ struct omap_nand_info *info = container_of(mtd,
+ struct omap_nand_info, mtd);
+ u16 *p = (u16 *) buf;
+
+ /* FIXME try bursts of writesw() or DMA ... */
+ len >>= 1;
+
+ while (len--) {
+ writew(*p++, info->nand.IO_ADDR_W);
+
+ while (GPMC_BUF_EMPTY == (readl(info->gpmc_baseaddr +
+ GPMC_STATUS) & GPMC_BUF_FULL))
+ ;
+ }
+}
+/**
+ * omap_verify_buf - Verify chip data against buffer
+ * @mtd: MTD device structure
+ * @buf: buffer containing the data to compare
+ * @len: number of bytes to compare
+ */
+static int omap_verify_buf(struct mtd_info *mtd, const u_char * buf, int len)
+{
+ struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
+ mtd);
+ u16 *p = (u16 *) buf;
+
+ len >>= 1;
+ while (len--) {
+ if (*p++ != cpu_to_le16(readw(info->nand.IO_ADDR_R)))
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+#ifdef CONFIG_MTD_NAND_OMAP_HWECC
+/**
+ * omap_hwecc_init - Initialize the HW ECC for NAND flash in GPMC controller
+ * @mtd: MTD device structure
+ */
+static void omap_hwecc_init(struct mtd_info *mtd)
+{
+ struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
+ mtd);
+ struct nand_chip *chip = mtd->priv;
+ unsigned long val = 0x0;
+
+ /* Read from ECC Control Register */
+ val = __raw_readl(info->gpmc_baseaddr + GPMC_ECC_CONTROL);
+ /* Clear all ECC | Enable Reg1 */
+ val = ((0x00000001<<8) | 0x00000001);
+ __raw_writel(val, info->gpmc_baseaddr + GPMC_ECC_CONTROL);
+
+ /* Read from ECC Size Config Register */
+ val = __raw_readl(info->gpmc_baseaddr + GPMC_ECC_SIZE_CONFIG);
+ /* ECCSIZE1=512 | Select eccResultsize[0-3] */
+ val = ((((chip->ecc.size >> 1) - 1) << 22) | (0x0000000F));
+ __raw_writel(val, info->gpmc_baseaddr + GPMC_ECC_SIZE_CONFIG);
+}
+
+/**
+ * gen_true_ecc - This function will generate true ECC value
+ * @ecc_buf: buffer to store ecc code
+ *
+ * This generated true ECC value can be used when correcting
+ * data read from NAND flash memory core
+ */
+static void gen_true_ecc(u8 *ecc_buf)
+{
+ u32 tmp = ecc_buf[0] | (ecc_buf[1] << 16) |
+ ((ecc_buf[2] & 0xF0) << 20) | ((ecc_buf[2] & 0x0F) << 8);
+
+ ecc_buf[0] = ~(P64o(tmp) | P64e(tmp) | P32o(tmp) | P32e(tmp) |
+ P16o(tmp) | P16e(tmp) | P8o(tmp) | P8e(tmp));
+ ecc_buf[1] = ~(P1024o(tmp) | P1024e(tmp) | P512o(tmp) | P512e(tmp) |
+ P256o(tmp) | P256e(tmp) | P128o(tmp) | P128e(tmp));
+ ecc_buf[2] = ~(P4o(tmp) | P4e(tmp) | P2o(tmp) | P2e(tmp) | P1o(tmp) |
+ P1e(tmp) | P2048o(tmp) | P2048e(tmp));
+}
+
+/**
+ * omap_compare_ecc - Detect (2 bits) and correct (1 bit) error in data
+ * @ecc_data1: ecc code from nand spare area
+ * @ecc_data2: ecc code from hardware register obtained from hardware ecc
+ * @page_data: page data
+ *
+ * This function compares two ECC's and indicates if there is an error.
+ * If the error can be corrected it will be corrected to the buffer.
+ */
+static int omap_compare_ecc(u8 *ecc_data1, /* read from NAND memory */
+ u8 *ecc_data2, /* read from register */
+ u8 *page_data)
+{
+ uint i;
+ u8 tmp0_bit[8], tmp1_bit[8], tmp2_bit[8];
+ u8 comp0_bit[8], comp1_bit[8], comp2_bit[8];
+ u8 ecc_bit[24];
+ u8 ecc_sum = 0;
+ u8 find_bit = 0;
+ uint find_byte = 0;
+ int isEccFF;
+
+ isEccFF = ((*(u32 *)ecc_data1 & 0xFFFFFF) == 0xFFFFFF);
+
+ gen_true_ecc(ecc_data1);
+ gen_true_ecc(ecc_data2);
+
+ for (i = 0; i <= 2; i++) {
+ *(ecc_data1 + i) = ~(*(ecc_data1 + i));
+ *(ecc_data2 + i) = ~(*(ecc_data2 + i));
+ }
+
+ for (i = 0; i < 8; i++) {
+ tmp0_bit[i] = *ecc_data1 % 2;
+ *ecc_data1 = *ecc_data1 / 2;
+ }
+
+ for (i = 0; i < 8; i++) {
+ tmp1_bit[i] = *(ecc_data1 + 1) % 2;
+ *(ecc_data1 + 1) = *(ecc_data1 + 1) / 2;
+ }
+
+ for (i = 0; i < 8; i++) {
+ tmp2_bit[i] = *(ecc_data1 + 2) % 2;
+ *(ecc_data1 + 2) = *(ecc_data1 + 2) / 2;
+ }
+
+ for (i = 0; i < 8; i++) {
+ comp0_bit[i] = *ecc_data2 % 2;
+ *ecc_data2 = *ecc_data2 / 2;
+ }
+
+ for (i = 0; i < 8; i++) {
+ comp1_bit[i] = *(ecc_data2 + 1) % 2;
+ *(ecc_data2 + 1) = *(ecc_data2 + 1) / 2;
+ }
+
+ for (i = 0; i < 8; i++) {
+ comp2_bit[i] = *(ecc_data2 + 2) % 2;
+ *(ecc_data2 + 2) = *(ecc_data2 + 2) / 2;
+ }
+
+ for (i = 0; i < 6; i++)
+ ecc_bit[i] = tmp2_bit[i + 2] ^ comp2_bit[i + 2];
+
+ for (i = 0; i < 8; i++)
+ ecc_bit[i + 6] = tmp0_bit[i] ^ comp0_bit[i];
+
+ for (i = 0; i < 8; i++)
+ ecc_bit[i + 14] = tmp1_bit[i] ^ comp1_bit[i];
+
+ ecc_bit[22] = tmp2_bit[0] ^ comp2_bit[0];
+ ecc_bit[23] = tmp2_bit[1] ^ comp2_bit[1];
+
+ for (i = 0; i < 24; i++)
+ ecc_sum += ecc_bit[i];
+
+ switch (ecc_sum) {
+ case 0:
+ /* Not reached because this function is not called if
+ * ECC values are equal
+ */
+ return 0;
+
+ case 1:
+ /* Uncorrectable error */
+ DEBUG(MTD_DEBUG_LEVEL0, "ECC UNCORRECTED_ERROR 1\n");
+ return -1;
+
+ case 11:
+ /* UN-Correctable error */
+ DEBUG(MTD_DEBUG_LEVEL0, "ECC UNCORRECTED_ERROR B\n");
+ return -1;
+
+ case 12:
+ /* Correctable error */
+ find_byte = (ecc_bit[23] << 8) +
+ (ecc_bit[21] << 7) +
+ (ecc_bit[19] << 6) +
+ (ecc_bit[17] << 5) +
+ (ecc_bit[15] << 4) +
+ (ecc_bit[13] << 3) +
+ (ecc_bit[11] << 2) +
+ (ecc_bit[9] << 1) +
+ ecc_bit[7];
+
+ find_bit = (ecc_bit[5] << 2) + (ecc_bit[3] << 1) + ecc_bit[1];
+
+ DEBUG(MTD_DEBUG_LEVEL0, "Correcting single bit ECC error at "
+ "offset: %d, bit: %d\n", find_byte, find_bit);
+
+ page_data[find_byte] ^= (1 << find_bit);
+
+ return 0;
+ default:
+ if (isEccFF) {
+ if (ecc_data2[0] == 0 &&
+ ecc_data2[1] == 0 &&
+ ecc_data2[2] == 0)
+ return 0;
+ }
+ DEBUG(MTD_DEBUG_LEVEL0, "UNCORRECTED_ERROR default\n");
+ return -1;
+ }
+}
+
+/**
+ * omap_correct_data - Compares the ECC read with HW generated ECC
+ * @mtd: MTD device structure
+ * @dat: page data
+ * @read_ecc: ecc read from nand flash
+ * @calc_ecc: ecc read from HW ECC registers
+ *
+ * Compares the ecc read from nand spare area with ECC registers values
+ * and if ECC's mismached, it will call 'omap_compare_ecc' for error detection
+ * and correction.
+ */
+static int omap_correct_data(struct mtd_info *mtd, u_char *dat,
+ u_char *read_ecc, u_char *calc_ecc)
+{
+ struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
+ mtd);
+ int blockCnt = 0, i = 0, ret = 0;
+
+ /* Ex NAND_ECC_HW12_2048 */
+ if ((info->nand.ecc.mode == NAND_ECC_HW) &&
+ (info->nand.ecc.size == 2048))
+ blockCnt = 4;
+ else
+ blockCnt = 1;
+
+ for (i = 0; i < blockCnt; i++) {
+ if (memcmp(read_ecc, calc_ecc, 3) != 0) {
+ ret = omap_compare_ecc(read_ecc, calc_ecc, dat);
+ if (ret < 0)
+ return ret;
+ }
+ read_ecc += 3;
+ calc_ecc += 3;
+ dat += 512;
+ }
+ return 0;
+}
+
+/**
+ * omap_calcuate_ecc - Generate non-inverted ECC bytes.
+ * @mtd: MTD device structure
+ * @dat: The pointer to data on which ecc is computed
+ * @ecc_code: The ecc_code buffer
+ *
+ * Using noninverted ECC can be considered ugly since writing a blank
+ * page ie. padding will clear the ECC bytes. This is no problem as long
+ * nobody is trying to write data on the seemingly unused page. Reading
+ * an erased page will produce an ECC mismatch between generated and read
+ * ECC bytes that has to be dealt with separately.
+ */
+static int omap_calculate_ecc(struct mtd_info *mtd, const u_char *dat,
+ u_char *ecc_code)
+{
+ struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
+ mtd);
+ unsigned long val = 0x0;
+ unsigned long reg;
+
+ /* Start Reading from HW ECC1_Result = 0x200 */
+ reg = (unsigned long)(info->gpmc_baseaddr + GPMC_ECC1_RESULT);
+ val = __raw_readl(reg);
+ *ecc_code++ = val; /* P128e, ..., P1e */
+ *ecc_code++ = val >> 16; /* P128o, ..., P1o */
+ /* P2048o, P1024o, P512o, P256o, P2048e, P1024e, P512e, P256e */
+ *ecc_code++ = ((val >> 8) & 0x0f) | ((val >> 20) & 0xf0);
+ reg += 4;
+
+ return 0;
+}
+
+/**
+ * omap_enable_hwecc - This function enables the hardware ecc functionality
+ * @mtd: MTD device structure
+ * @mode: Read/Write mode
+ */
+static void omap_enable_hwecc(struct mtd_info *mtd, int mode)
+{
+ struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
+ mtd);
+ struct nand_chip *chip = mtd->priv;
+ unsigned int dev_width = (chip->options & NAND_BUSWIDTH_16) ? 1 : 0;
+ unsigned long val = __raw_readl(info->gpmc_baseaddr + GPMC_ECC_CONFIG);
+
+ switch (mode) {
+ case NAND_ECC_READ:
+ __raw_writel(0x101, info->gpmc_baseaddr + GPMC_ECC_CONTROL);
+ /* (ECC 16 or 8 bit col) | ( CS ) | ECC Enable */
+ val = (dev_width << 7) | (info->gpmc_cs << 1) | (0x1);
+ break;
+ case NAND_ECC_READSYN:
+ __raw_writel(0x100, info->gpmc_baseaddr + GPMC_ECC_CONTROL);
+ /* (ECC 16 or 8 bit col) | ( CS ) | ECC Enable */
+ val = (dev_width << 7) | (info->gpmc_cs << 1) | (0x1);
+ break;
+ case NAND_ECC_WRITE:
+ __raw_writel(0x101, info->gpmc_baseaddr + GPMC_ECC_CONTROL);
+ /* (ECC 16 or 8 bit col) | ( CS ) | ECC Enable */
+ val = (dev_width << 7) | (info->gpmc_cs << 1) | (0x1);
+ break;
+ default:
+ DEBUG(MTD_DEBUG_LEVEL0, "Error: Unrecognized Mode[%d]!\n",
+ mode);
+ break;
+ }
+
+ __raw_writel(val, info->gpmc_baseaddr + GPMC_ECC_CONFIG);
+}
+#endif
+
+/**
+ * omap_wait - wait until the command is done
+ * @mtd: MTD device structure
+ * @chip: NAND Chip structure
+ *
+ * Wait function is called during Program and erase operations and
+ * the way it is called from MTD layer, we should wait till the NAND
+ * chip is ready after the programming/erase operation has completed.
+ *
+ * Erase can take up to 400ms and program up to 20ms according to
+ * general NAND and SmartMedia specs
+ */
+static int omap_wait(struct mtd_info *mtd, struct nand_chip *chip)
+{
+ struct nand_chip *this = mtd->priv;
+ struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
+ mtd);
+ unsigned long timeo = jiffies;
+ int status = NAND_STATUS_FAIL, state = this->state;
+
+ if (state == FL_ERASING)
+ timeo += (HZ * 400) / 1000;
+ else
+ timeo += (HZ * 20) / 1000;
+
+ this->IO_ADDR_W = (void *) info->gpmc_cs_baseaddr +
+ GPMC_CS_NAND_COMMAND;
+ this->IO_ADDR_R = (void *) info->gpmc_cs_baseaddr + GPMC_CS_NAND_DATA;
+
+ __raw_writeb(NAND_CMD_STATUS & 0xFF, this->IO_ADDR_W);
+
+ while (time_before(jiffies, timeo)) {
+ status = __raw_readb(this->IO_ADDR_R);
+ if (status & NAND_STATUS_READY)
+ break;
+ cond_resched();
+ }
+ return status;
+}
+
+/**
+ * omap_dev_ready - calls the platform specific dev_ready function
+ * @mtd: MTD device structure
+ */
+static int omap_dev_ready(struct mtd_info *mtd)
+{
+ struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
+ mtd);
+ unsigned int val = __raw_readl(info->gpmc_baseaddr + GPMC_IRQ_STATUS);
+
+ if ((val & 0x100) == 0x100) {
+ /* Clear IRQ Interrupt */
+ val |= 0x100;
+ val &= ~(0x0);
+ __raw_writel(val, info->gpmc_baseaddr + GPMC_IRQ_STATUS);
+ } else {
+ unsigned int cnt = 0;
+ while (cnt++ < 0x1FF) {
+ if ((val & 0x100) == 0x100)
+ return 0;
+ val = __raw_readl(info->gpmc_baseaddr +
+ GPMC_IRQ_STATUS);
+ }
+ }
+
+ return 1;
+}
+
+static int __devinit omap_nand_probe(struct platform_device *pdev)
+{
+ struct omap_nand_info *info;
+ struct omap_nand_platform_data *pdata;
+ int err;
+ unsigned long val;
+
+
+ pdata = pdev->dev.platform_data;
+ if (pdata == NULL) {
+ dev_err(&pdev->dev, "platform data missing\n");
+ return -ENODEV;
+ }
+
+ info = kzalloc(sizeof(struct omap_nand_info), GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, info);
+
+ spin_lock_init(&info->controller.lock);
+ init_waitqueue_head(&info->controller.wq);
+
+ info->pdev = pdev;
+
+ info->gpmc_cs = pdata->cs;
+ info->gpmc_baseaddr = pdata->gpmc_baseaddr;
+ info->gpmc_cs_baseaddr = pdata->gpmc_cs_baseaddr;
+
+ info->mtd.priv = &info->nand;
+ info->mtd.name = dev_name(&pdev->dev);
+ info->mtd.owner = THIS_MODULE;
+
+ err = gpmc_cs_request(info->gpmc_cs, NAND_IO_SIZE, &info->phys_base);
+ if (err < 0) {
+ dev_err(&pdev->dev, "Cannot request GPMC CS\n");
+ goto out_free_info;
+ }
+
+ /* Enable RD PIN Monitoring Reg */
+ if (pdata->dev_ready) {
+ val = gpmc_cs_read_reg(info->gpmc_cs, GPMC_CS_CONFIG1);
+ val |= WR_RD_PIN_MONITORING;
+ gpmc_cs_write_reg(info->gpmc_cs, GPMC_CS_CONFIG1, val);
+ }
+
+ val = gpmc_cs_read_reg(info->gpmc_cs, GPMC_CS_CONFIG7);
+ val &= ~(0xf << 8);
+ val |= (0xc & 0xf) << 8;
+ gpmc_cs_write_reg(info->gpmc_cs, GPMC_CS_CONFIG7, val);
+
+ /* NAND write protect off */
+ omap_nand_wp(&info->mtd, NAND_WP_OFF);
+
+ if (!request_mem_region(info->phys_base, NAND_IO_SIZE,
+ pdev->dev.driver->name)) {
+ err = -EBUSY;
+ goto out_free_cs;
+ }
+
+ info->nand.IO_ADDR_R = ioremap(info->phys_base, NAND_IO_SIZE);
+ if (!info->nand.IO_ADDR_R) {
+ err = -ENOMEM;
+ goto out_release_mem_region;
+ }
+ info->nand.controller = &info->controller;
+
+ info->nand.IO_ADDR_W = info->nand.IO_ADDR_R;
+ info->nand.cmd_ctrl = omap_hwcontrol;
+
+ /* REVISIT: only supports 16-bit NAND flash */
+
+ info->nand.read_buf = omap_read_buf16;
+ info->nand.write_buf = omap_write_buf16;
+ info->nand.verify_buf = omap_verify_buf;
+
+ /*
+ * If RDY/BSY line is connected to OMAP then use the omap ready
+ * funcrtion and the generic nand_wait function which reads the status
+ * register after monitoring the RDY/BSY line.Otherwise use a standard
+ * chip delay which is slightly more than tR (AC Timing) of the NAND
+ * device and read status register until you get a failure or success
+ */
+ if (pdata->dev_ready) {
+ info->nand.dev_ready = omap_dev_ready;
+ info->nand.chip_delay = 0;
+ } else {
+ info->nand.waitfunc = omap_wait;
+ info->nand.chip_delay = 50;
+ }
+
+ info->nand.options |= NAND_SKIP_BBTSCAN;
+ if ((gpmc_cs_read_reg(info->gpmc_cs, GPMC_CS_CONFIG1) & 0x3000)
+ == 0x1000)
+ info->nand.options |= NAND_BUSWIDTH_16;
+
+#ifdef CONFIG_MTD_NAND_OMAP_HWECC
+ info->nand.ecc.bytes = 3;
+ info->nand.ecc.size = 512;
+ info->nand.ecc.calculate = omap_calculate_ecc;
+ info->nand.ecc.hwctl = omap_enable_hwecc;
+ info->nand.ecc.correct = omap_correct_data;
+ info->nand.ecc.mode = NAND_ECC_HW;
+
+ /* init HW ECC */
+ omap_hwecc_init(&info->mtd);
+#else
+ info->nand.ecc.mode = NAND_ECC_SOFT;
+#endif
+
+ /* DIP switches on some boards change between 8 and 16 bit
+ * bus widths for flash. Try the other width if the first try fails.
+ */
+ if (nand_scan(&info->mtd, 1)) {
+ info->nand.options ^= NAND_BUSWIDTH_16;
+ if (nand_scan(&info->mtd, 1)) {
+ err = -ENXIO;
+ goto out_release_mem_region;
+ }
+ }
+
+#ifdef CONFIG_MTD_PARTITIONS
+ err = parse_mtd_partitions(&info->mtd, part_probes, &info->parts, 0);
+ if (err > 0)
+ add_mtd_partitions(&info->mtd, info->parts, err);
+ else if (pdata->parts)
+ add_mtd_partitions(&info->mtd, pdata->parts, pdata->nr_parts);
+ else
+#endif
+ add_mtd_device(&info->mtd);
+
+ platform_set_drvdata(pdev, &info->mtd);
+
+ return 0;
+
+out_release_mem_region:
+ release_mem_region(info->phys_base, NAND_IO_SIZE);
+out_free_cs:
+ gpmc_cs_free(info->gpmc_cs);
+out_free_info:
+ kfree(info);
+
+ return err;
+}
+
+static int omap_nand_remove(struct platform_device *pdev)
+{
+ struct mtd_info *mtd = platform_get_drvdata(pdev);
+ struct omap_nand_info *info = mtd->priv;
+
+ platform_set_drvdata(pdev, NULL);
+ /* Release NAND device, its internal structures and partitions */
+ nand_release(&info->mtd);
+ iounmap(info->nand.IO_ADDR_R);
+ kfree(&info->mtd);
+ return 0;
+}
+
+static struct platform_driver omap_nand_driver = {
+ .probe = omap_nand_probe,
+ .remove = omap_nand_remove,
+ .driver = {
+ .name = DRIVER_NAME,
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init omap_nand_init(void)
+{
+ printk(KERN_INFO "%s driver initializing\n", DRIVER_NAME);
+ return platform_driver_register(&omap_nand_driver);
+}
+
+static void __exit omap_nand_exit(void)
+{
+ platform_driver_unregister(&omap_nand_driver);
+}
+
+module_init(omap_nand_init);
+module_exit(omap_nand_exit);
+
+MODULE_ALIAS(DRIVER_NAME);
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Glue layer for NAND flash on TI OMAP boards");
diff --git a/drivers/mtd/nand/orion_nand.c b/drivers/mtd/nand/orion_nand.c
index c2dfd3ea353d..7ad972229db4 100644
--- a/drivers/mtd/nand/orion_nand.c
+++ b/drivers/mtd/nand/orion_nand.c
@@ -47,6 +47,28 @@ static void orion_nand_cmd_ctrl(struct mtd_info *mtd, int cmd, unsigned int ctrl
writeb(cmd, nc->IO_ADDR_W + offs);
}
+static void orion_nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
+{
+ struct nand_chip *chip = mtd->priv;
+ void __iomem *io_base = chip->IO_ADDR_R;
+ uint64_t *buf64;
+ int i = 0;
+
+ while (len && (unsigned long)buf & 7) {
+ *buf++ = readb(io_base);
+ len--;
+ }
+ buf64 = (uint64_t *)buf;
+ while (i < len/8) {
+ uint64_t x;
+ asm ("ldrd\t%0, [%1]" : "=r" (x) : "r" (io_base));
+ buf64[i++] = x;
+ }
+ i *= 8;
+ while (i < len)
+ buf[i++] = readb(io_base);
+}
+
static int __init orion_nand_probe(struct platform_device *pdev)
{
struct mtd_info *mtd;
@@ -83,6 +105,7 @@ static int __init orion_nand_probe(struct platform_device *pdev)
nc->priv = board;
nc->IO_ADDR_R = nc->IO_ADDR_W = io_base;
nc->cmd_ctrl = orion_nand_cmd_ctrl;
+ nc->read_buf = orion_nand_read_buf;
nc->ecc.mode = NAND_ECC_SOFT;
if (board->chip_delay)
diff --git a/drivers/mtd/nand/plat_nand.c b/drivers/mtd/nand/plat_nand.c
index 86e1d08eee00..4e16c6f5bdd5 100644
--- a/drivers/mtd/nand/plat_nand.c
+++ b/drivers/mtd/nand/plat_nand.c
@@ -61,6 +61,8 @@ static int __devinit plat_nand_probe(struct platform_device *pdev)
data->chip.cmd_ctrl = pdata->ctrl.cmd_ctrl;
data->chip.dev_ready = pdata->ctrl.dev_ready;
data->chip.select_chip = pdata->ctrl.select_chip;
+ data->chip.write_buf = pdata->ctrl.write_buf;
+ data->chip.read_buf = pdata->ctrl.read_buf;
data->chip.chip_delay = pdata->chip.chip_delay;
data->chip.options |= pdata->chip.options;
@@ -70,6 +72,13 @@ static int __devinit plat_nand_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, data);
+ /* Handle any platform specific setup */
+ if (pdata->ctrl.probe) {
+ res = pdata->ctrl.probe(pdev);
+ if (res)
+ goto out;
+ }
+
/* Scan to find existance of the device */
if (nand_scan(&data->mtd, 1)) {
res = -ENXIO;
@@ -86,6 +95,8 @@ static int __devinit plat_nand_probe(struct platform_device *pdev)
return 0;
}
}
+ if (pdata->chip.set_parts)
+ pdata->chip.set_parts(data->mtd.size, &pdata->chip);
if (pdata->chip.partitions) {
data->parts = pdata->chip.partitions;
res = add_mtd_partitions(&data->mtd, data->parts,
@@ -99,6 +110,8 @@ static int __devinit plat_nand_probe(struct platform_device *pdev)
nand_release(&data->mtd);
out:
+ if (pdata->ctrl.remove)
+ pdata->ctrl.remove(pdev);
platform_set_drvdata(pdev, NULL);
iounmap(data->io_base);
kfree(data);
@@ -111,15 +124,15 @@ out:
static int __devexit plat_nand_remove(struct platform_device *pdev)
{
struct plat_nand_data *data = platform_get_drvdata(pdev);
-#ifdef CONFIG_MTD_PARTITIONS
struct platform_nand_data *pdata = pdev->dev.platform_data;
-#endif
nand_release(&data->mtd);
#ifdef CONFIG_MTD_PARTITIONS
if (data->parts && data->parts != pdata->chip.partitions)
kfree(data->parts);
#endif
+ if (pdata->ctrl.remove)
+ pdata->ctrl.remove(pdev);
iounmap(data->io_base);
kfree(data);
@@ -128,7 +141,7 @@ static int __devexit plat_nand_remove(struct platform_device *pdev)
static struct platform_driver plat_nand_driver = {
.probe = plat_nand_probe,
- .remove = plat_nand_remove,
+ .remove = __devexit_p(plat_nand_remove),
.driver = {
.name = "gen_nand",
.owner = THIS_MODULE,
diff --git a/drivers/mtd/nand/s3c2410.c b/drivers/mtd/nand/s3c2410.c
index 8e375d5fe231..11dc7e69c4fb 100644
--- a/drivers/mtd/nand/s3c2410.c
+++ b/drivers/mtd/nand/s3c2410.c
@@ -74,6 +74,14 @@ static struct nand_ecclayout nand_hw_eccoob = {
struct s3c2410_nand_info;
+/**
+ * struct s3c2410_nand_mtd - driver MTD structure
+ * @mtd: The MTD instance to pass to the MTD layer.
+ * @chip: The NAND chip information.
+ * @set: The platform information supplied for this set of NAND chips.
+ * @info: Link back to the hardware information.
+ * @scan_res: The result from calling nand_scan_ident().
+*/
struct s3c2410_nand_mtd {
struct mtd_info mtd;
struct nand_chip chip;
@@ -90,6 +98,21 @@ enum s3c_cpu_type {
/* overview of the s3c2410 nand state */
+/**
+ * struct s3c2410_nand_info - NAND controller state.
+ * @mtds: An array of MTD instances on this controoler.
+ * @platform: The platform data for this board.
+ * @device: The platform device we bound to.
+ * @area: The IO area resource that came from request_mem_region().
+ * @clk: The clock resource for this controller.
+ * @regs: The area mapped for the hardware registers described by @area.
+ * @sel_reg: Pointer to the register controlling the NAND selection.
+ * @sel_bit: The bit in @sel_reg to select the NAND chip.
+ * @mtd_count: The number of MTDs created from this controller.
+ * @save_sel: The contents of @sel_reg to be saved over suspend.
+ * @clk_rate: The clock rate from @clk.
+ * @cpu_type: The exact type of this controller.
+ */
struct s3c2410_nand_info {
/* mtd info */
struct nand_hw_control controller;
@@ -145,12 +168,19 @@ static inline int allow_clk_stop(struct s3c2410_nand_info *info)
#define NS_IN_KHZ 1000000
+/**
+ * s3c_nand_calc_rate - calculate timing data.
+ * @wanted: The cycle time in nanoseconds.
+ * @clk: The clock rate in kHz.
+ * @max: The maximum divider value.
+ *
+ * Calculate the timing value from the given parameters.
+ */
static int s3c_nand_calc_rate(int wanted, unsigned long clk, int max)
{
int result;
- result = (wanted * clk) / NS_IN_KHZ;
- result++;
+ result = DIV_ROUND_UP((wanted * clk), NS_IN_KHZ);
pr_debug("result %d from %ld, %d\n", result, clk, wanted);
@@ -169,13 +199,21 @@ static int s3c_nand_calc_rate(int wanted, unsigned long clk, int max)
/* controller setup */
+/**
+ * s3c2410_nand_setrate - setup controller timing information.
+ * @info: The controller instance.
+ *
+ * Given the information supplied by the platform, calculate and set
+ * the necessary timing registers in the hardware to generate the
+ * necessary timing cycles to the hardware.
+ */
static int s3c2410_nand_setrate(struct s3c2410_nand_info *info)
{
struct s3c2410_platform_nand *plat = info->platform;
int tacls_max = (info->cpu_type == TYPE_S3C2412) ? 8 : 4;
int tacls, twrph0, twrph1;
unsigned long clkrate = clk_get_rate(info->clk);
- unsigned long set, cfg, mask;
+ unsigned long uninitialized_var(set), cfg, uninitialized_var(mask);
unsigned long flags;
/* calculate the timing information for the controller */
@@ -215,9 +253,9 @@ static int s3c2410_nand_setrate(struct s3c2410_nand_info *info)
case TYPE_S3C2440:
case TYPE_S3C2412:
- mask = (S3C2410_NFCONF_TACLS(tacls_max - 1) |
- S3C2410_NFCONF_TWRPH0(7) |
- S3C2410_NFCONF_TWRPH1(7));
+ mask = (S3C2440_NFCONF_TACLS(tacls_max - 1) |
+ S3C2440_NFCONF_TWRPH0(7) |
+ S3C2440_NFCONF_TWRPH1(7));
set = S3C2440_NFCONF_TACLS(tacls - 1);
set |= S3C2440_NFCONF_TWRPH0(twrph0 - 1);
@@ -225,14 +263,9 @@ static int s3c2410_nand_setrate(struct s3c2410_nand_info *info)
break;
default:
- /* keep compiler happy */
- mask = 0;
- set = 0;
BUG();
}
- dev_dbg(info->device, "NF_CONF is 0x%lx\n", cfg);
-
local_irq_save(flags);
cfg = readl(info->regs + S3C2410_NFCONF);
@@ -242,9 +275,18 @@ static int s3c2410_nand_setrate(struct s3c2410_nand_info *info)
local_irq_restore(flags);
+ dev_dbg(info->device, "NF_CONF is 0x%lx\n", cfg);
+
return 0;
}
+/**
+ * s3c2410_nand_inithw - basic hardware initialisation
+ * @info: The hardware state.
+ *
+ * Do the basic initialisation of the hardware, using s3c2410_nand_setrate()
+ * to setup the hardware access speeds and set the controller to be enabled.
+*/
static int s3c2410_nand_inithw(struct s3c2410_nand_info *info)
{
int ret;
@@ -268,8 +310,19 @@ static int s3c2410_nand_inithw(struct s3c2410_nand_info *info)
return 0;
}
-/* select chip */
-
+/**
+ * s3c2410_nand_select_chip - select the given nand chip
+ * @mtd: The MTD instance for this chip.
+ * @chip: The chip number.
+ *
+ * This is called by the MTD layer to either select a given chip for the
+ * @mtd instance, or to indicate that the access has finished and the
+ * chip can be de-selected.
+ *
+ * The routine ensures that the nFCE line is correctly setup, and any
+ * platform specific selection code is called to route nFCE to the specific
+ * chip.
+ */
static void s3c2410_nand_select_chip(struct mtd_info *mtd, int chip)
{
struct s3c2410_nand_info *info;
@@ -530,7 +583,16 @@ static void s3c2410_nand_read_buf(struct mtd_info *mtd, u_char *buf, int len)
static void s3c2440_nand_read_buf(struct mtd_info *mtd, u_char *buf, int len)
{
struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd);
- readsl(info->regs + S3C2440_NFDATA, buf, len / 4);
+
+ readsl(info->regs + S3C2440_NFDATA, buf, len >> 2);
+
+ /* cleanup if we've got less than a word to do */
+ if (len & 3) {
+ buf += len & ~3;
+
+ for (; len & 3; len--)
+ *buf++ = readb(info->regs + S3C2440_NFDATA);
+ }
}
static void s3c2410_nand_write_buf(struct mtd_info *mtd, const u_char *buf, int len)
@@ -542,7 +604,16 @@ static void s3c2410_nand_write_buf(struct mtd_info *mtd, const u_char *buf, int
static void s3c2440_nand_write_buf(struct mtd_info *mtd, const u_char *buf, int len)
{
struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd);
- writesl(info->regs + S3C2440_NFDATA, buf, len / 4);
+
+ writesl(info->regs + S3C2440_NFDATA, buf, len >> 2);
+
+ /* cleanup any fractional write */
+ if (len & 3) {
+ buf += len & ~3;
+
+ for (; len & 3; len--, buf++)
+ writeb(*buf, info->regs + S3C2440_NFDATA);
+ }
}
/* cpufreq driver support */
@@ -593,7 +664,7 @@ static inline void s3c2410_nand_cpufreq_deregister(struct s3c2410_nand_info *inf
/* device management functions */
-static int s3c2410_nand_remove(struct platform_device *pdev)
+static int s3c24xx_nand_remove(struct platform_device *pdev)
{
struct s3c2410_nand_info *info = to_nand_info(pdev);
@@ -645,17 +716,31 @@ static int s3c2410_nand_remove(struct platform_device *pdev)
}
#ifdef CONFIG_MTD_PARTITIONS
+const char *part_probes[] = { "cmdlinepart", NULL };
static int s3c2410_nand_add_partition(struct s3c2410_nand_info *info,
struct s3c2410_nand_mtd *mtd,
struct s3c2410_nand_set *set)
{
+ struct mtd_partition *part_info;
+ int nr_part = 0;
+
if (set == NULL)
return add_mtd_device(&mtd->mtd);
- if (set->nr_partitions > 0 && set->partitions != NULL) {
- return add_mtd_partitions(&mtd->mtd, set->partitions, set->nr_partitions);
+ if (set->nr_partitions == 0) {
+ mtd->mtd.name = set->name;
+ nr_part = parse_mtd_partitions(&mtd->mtd, part_probes,
+ &part_info, 0);
+ } else {
+ if (set->nr_partitions > 0 && set->partitions != NULL) {
+ nr_part = set->nr_partitions;
+ part_info = set->partitions;
+ }
}
+ if (nr_part > 0 && part_info)
+ return add_mtd_partitions(&mtd->mtd, part_info, nr_part);
+
return add_mtd_device(&mtd->mtd);
}
#else
@@ -667,11 +752,16 @@ static int s3c2410_nand_add_partition(struct s3c2410_nand_info *info,
}
#endif
-/* s3c2410_nand_init_chip
+/**
+ * s3c2410_nand_init_chip - initialise a single instance of an chip
+ * @info: The base NAND controller the chip is on.
+ * @nmtd: The new controller MTD instance to fill in.
+ * @set: The information passed from the board specific platform data.
*
- * init a single instance of an chip
-*/
-
+ * Initialise the given @nmtd from the information in @info and @set. This
+ * readies the structure for use with the MTD layer functions by ensuring
+ * all pointers are setup and the necessary control routines selected.
+ */
static void s3c2410_nand_init_chip(struct s3c2410_nand_info *info,
struct s3c2410_nand_mtd *nmtd,
struct s3c2410_nand_set *set)
@@ -757,14 +847,40 @@ static void s3c2410_nand_init_chip(struct s3c2410_nand_info *info,
if (set->disable_ecc)
chip->ecc.mode = NAND_ECC_NONE;
+
+ switch (chip->ecc.mode) {
+ case NAND_ECC_NONE:
+ dev_info(info->device, "NAND ECC disabled\n");
+ break;
+ case NAND_ECC_SOFT:
+ dev_info(info->device, "NAND soft ECC\n");
+ break;
+ case NAND_ECC_HW:
+ dev_info(info->device, "NAND hardware ECC\n");
+ break;
+ default:
+ dev_info(info->device, "NAND ECC UNKNOWN\n");
+ break;
+ }
+
+ /* If you use u-boot BBT creation code, specifying this flag will
+ * let the kernel fish out the BBT from the NAND, and also skip the
+ * full NAND scan that can take 1/2s or so. Little things... */
+ if (set->flash_bbt)
+ chip->options |= NAND_USE_FLASH_BBT | NAND_SKIP_BBTSCAN;
}
-/* s3c2410_nand_update_chip
+/**
+ * s3c2410_nand_update_chip - post probe update
+ * @info: The controller instance.
+ * @nmtd: The driver version of the MTD instance.
*
- * post-probe chip update, to change any items, such as the
- * layout for large page nand
- */
-
+ * This routine is called after the chip probe has succesfully completed
+ * and the relevant per-chip information updated. This call ensure that
+ * we update the internal state accordingly.
+ *
+ * The internal state is currently limited to the ECC state information.
+*/
static void s3c2410_nand_update_chip(struct s3c2410_nand_info *info,
struct s3c2410_nand_mtd *nmtd)
{
@@ -773,33 +889,33 @@ static void s3c2410_nand_update_chip(struct s3c2410_nand_info *info,
dev_dbg(info->device, "chip %p => page shift %d\n",
chip, chip->page_shift);
- if (hardware_ecc) {
+ if (chip->ecc.mode != NAND_ECC_HW)
+ return;
+
/* change the behaviour depending on wether we are using
* the large or small page nand device */
- if (chip->page_shift > 10) {
- chip->ecc.size = 256;
- chip->ecc.bytes = 3;
- } else {
- chip->ecc.size = 512;
- chip->ecc.bytes = 3;
- chip->ecc.layout = &nand_hw_eccoob;
- }
+ if (chip->page_shift > 10) {
+ chip->ecc.size = 256;
+ chip->ecc.bytes = 3;
+ } else {
+ chip->ecc.size = 512;
+ chip->ecc.bytes = 3;
+ chip->ecc.layout = &nand_hw_eccoob;
}
}
-/* s3c2410_nand_probe
+/* s3c24xx_nand_probe
*
* called by device layer when it finds a device matching
* one our driver can handled. This code checks to see if
* it can allocate all necessary resources then calls the
* nand layer to look for devices
*/
-
-static int s3c24xx_nand_probe(struct platform_device *pdev,
- enum s3c_cpu_type cpu_type)
+static int s3c24xx_nand_probe(struct platform_device *pdev)
{
struct s3c2410_platform_nand *plat = to_nand_plat(pdev);
+ enum s3c_cpu_type cpu_type;
struct s3c2410_nand_info *info;
struct s3c2410_nand_mtd *nmtd;
struct s3c2410_nand_set *sets;
@@ -809,6 +925,8 @@ static int s3c24xx_nand_probe(struct platform_device *pdev,
int nr_sets;
int setno;
+ cpu_type = platform_get_device_id(pdev)->driver_data;
+
pr_debug("s3c2410_nand_probe(%p)\n", pdev);
info = kmalloc(sizeof(*info), GFP_KERNEL);
@@ -922,7 +1040,7 @@ static int s3c24xx_nand_probe(struct platform_device *pdev,
return 0;
exit_error:
- s3c2410_nand_remove(pdev);
+ s3c24xx_nand_remove(pdev);
if (err == 0)
err = -EINVAL;
@@ -983,50 +1101,33 @@ static int s3c24xx_nand_resume(struct platform_device *dev)
/* driver device registration */
-static int s3c2410_nand_probe(struct platform_device *dev)
-{
- return s3c24xx_nand_probe(dev, TYPE_S3C2410);
-}
-
-static int s3c2440_nand_probe(struct platform_device *dev)
-{
- return s3c24xx_nand_probe(dev, TYPE_S3C2440);
-}
-
-static int s3c2412_nand_probe(struct platform_device *dev)
-{
- return s3c24xx_nand_probe(dev, TYPE_S3C2412);
-}
-
-static struct platform_driver s3c2410_nand_driver = {
- .probe = s3c2410_nand_probe,
- .remove = s3c2410_nand_remove,
- .suspend = s3c24xx_nand_suspend,
- .resume = s3c24xx_nand_resume,
- .driver = {
- .name = "s3c2410-nand",
- .owner = THIS_MODULE,
+static struct platform_device_id s3c24xx_driver_ids[] = {
+ {
+ .name = "s3c2410-nand",
+ .driver_data = TYPE_S3C2410,
+ }, {
+ .name = "s3c2440-nand",
+ .driver_data = TYPE_S3C2440,
+ }, {
+ .name = "s3c2412-nand",
+ .driver_data = TYPE_S3C2412,
+ }, {
+ .name = "s3c6400-nand",
+ .driver_data = TYPE_S3C2412, /* compatible with 2412 */
},
+ { }
};
-static struct platform_driver s3c2440_nand_driver = {
- .probe = s3c2440_nand_probe,
- .remove = s3c2410_nand_remove,
- .suspend = s3c24xx_nand_suspend,
- .resume = s3c24xx_nand_resume,
- .driver = {
- .name = "s3c2440-nand",
- .owner = THIS_MODULE,
- },
-};
+MODULE_DEVICE_TABLE(platform, s3c24xx_driver_ids);
-static struct platform_driver s3c2412_nand_driver = {
- .probe = s3c2412_nand_probe,
- .remove = s3c2410_nand_remove,
+static struct platform_driver s3c24xx_nand_driver = {
+ .probe = s3c24xx_nand_probe,
+ .remove = s3c24xx_nand_remove,
.suspend = s3c24xx_nand_suspend,
.resume = s3c24xx_nand_resume,
+ .id_table = s3c24xx_driver_ids,
.driver = {
- .name = "s3c2412-nand",
+ .name = "s3c24xx-nand",
.owner = THIS_MODULE,
},
};
@@ -1035,16 +1136,12 @@ static int __init s3c2410_nand_init(void)
{
printk("S3C24XX NAND Driver, (c) 2004 Simtec Electronics\n");
- platform_driver_register(&s3c2412_nand_driver);
- platform_driver_register(&s3c2440_nand_driver);
- return platform_driver_register(&s3c2410_nand_driver);
+ return platform_driver_register(&s3c24xx_nand_driver);
}
static void __exit s3c2410_nand_exit(void)
{
- platform_driver_unregister(&s3c2412_nand_driver);
- platform_driver_unregister(&s3c2440_nand_driver);
- platform_driver_unregister(&s3c2410_nand_driver);
+ platform_driver_unregister(&s3c24xx_nand_driver);
}
module_init(s3c2410_nand_init);
@@ -1053,6 +1150,3 @@ module_exit(s3c2410_nand_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>");
MODULE_DESCRIPTION("S3C24XX MTD NAND driver");
-MODULE_ALIAS("platform:s3c2410-nand");
-MODULE_ALIAS("platform:s3c2412-nand");
-MODULE_ALIAS("platform:s3c2440-nand");
diff --git a/drivers/mtd/nand/txx9ndfmc.c b/drivers/mtd/nand/txx9ndfmc.c
index 812479264896..488088eff2ca 100644
--- a/drivers/mtd/nand/txx9ndfmc.c
+++ b/drivers/mtd/nand/txx9ndfmc.c
@@ -64,7 +64,7 @@ struct txx9ndfmc_priv {
struct nand_chip chip;
struct mtd_info mtd;
int cs;
- char mtdname[BUS_ID_SIZE + 2];
+ const char *mtdname;
};
#define MAX_TXX9NDFMC_DEV 4
@@ -334,16 +334,23 @@ static int __init txx9ndfmc_probe(struct platform_device *dev)
if (plat->ch_mask != 1) {
txx9_priv->cs = i;
- sprintf(txx9_priv->mtdname, "%s.%u",
- dev_name(&dev->dev), i);
+ txx9_priv->mtdname = kasprintf(GFP_KERNEL, "%s.%u",
+ dev_name(&dev->dev), i);
} else {
txx9_priv->cs = -1;
- strcpy(txx9_priv->mtdname, dev_name(&dev->dev));
+ txx9_priv->mtdname = kstrdup(dev_name(&dev->dev),
+ GFP_KERNEL);
+ }
+ if (!txx9_priv->mtdname) {
+ kfree(txx9_priv);
+ dev_err(&dev->dev, "Unable to allocate MTD name.\n");
+ continue;
}
if (plat->wide_mask & (1 << i))
chip->options |= NAND_BUSWIDTH_16;
if (nand_scan(mtd, 1)) {
+ kfree(txx9_priv->mtdname);
kfree(txx9_priv);
continue;
}
@@ -385,6 +392,7 @@ static int __exit txx9ndfmc_remove(struct platform_device *dev)
kfree(drvdata->parts[i]);
#endif
del_mtd_device(mtd);
+ kfree(txx9_priv->mtdname);
kfree(txx9_priv);
}
return 0;
diff --git a/drivers/mtd/nftlcore.c b/drivers/mtd/nftlcore.c
index e3f8495a94c2..fb86cacd5bdb 100644
--- a/drivers/mtd/nftlcore.c
+++ b/drivers/mtd/nftlcore.c
@@ -208,7 +208,7 @@ static u16 NFTL_findfreeblock(struct NFTLrecord *nftl, int desperate )
/* Normally, we force a fold to happen before we run out of free blocks completely */
if (!desperate && nftl->numfreeEUNs < 2) {
DEBUG(MTD_DEBUG_LEVEL1, "NFTL_findfreeblock: there are too few free EUNs\n");
- return 0xffff;
+ return BLOCK_NIL;
}
/* Scan for a free block */
@@ -230,11 +230,11 @@ static u16 NFTL_findfreeblock(struct NFTLrecord *nftl, int desperate )
printk("Argh! No free blocks found! LastFreeEUN = %d, "
"FirstEUN = %d\n", nftl->LastFreeEUN,
le16_to_cpu(nftl->MediaHdr.FirstPhysicalEUN));
- return 0xffff;
+ return BLOCK_NIL;
}
} while (pot != nftl->LastFreeEUN);
- return 0xffff;
+ return BLOCK_NIL;
}
static u16 NFTL_foldchain (struct NFTLrecord *nftl, unsigned thisVUC, unsigned pendingblock )
@@ -431,7 +431,7 @@ static u16 NFTL_foldchain (struct NFTLrecord *nftl, unsigned thisVUC, unsigned p
/* add the header so that it is now a valid chain */
oob.u.a.VirtUnitNum = oob.u.a.SpareVirtUnitNum = cpu_to_le16(thisVUC);
- oob.u.a.ReplUnitNum = oob.u.a.SpareReplUnitNum = 0xffff;
+ oob.u.a.ReplUnitNum = oob.u.a.SpareReplUnitNum = BLOCK_NIL;
nftl_write_oob(mtd, (nftl->EraseSize * targetEUN) + 8,
8, &retlen, (char *)&oob.u);
@@ -515,7 +515,7 @@ static u16 NFTL_makefreeblock( struct NFTLrecord *nftl , unsigned pendingblock)
if (ChainLength < 2) {
printk(KERN_WARNING "No Virtual Unit Chains available for folding. "
"Failing request\n");
- return 0xffff;
+ return BLOCK_NIL;
}
return NFTL_foldchain (nftl, LongestChain, pendingblock);
@@ -578,7 +578,7 @@ static inline u16 NFTL_findwriteunit(struct NFTLrecord *nftl, unsigned block)
printk(KERN_WARNING
"Infinite loop in Virtual Unit Chain 0x%x\n",
thisVUC);
- return 0xffff;
+ return BLOCK_NIL;
}
/* Skip to next block in chain */
@@ -601,7 +601,7 @@ static inline u16 NFTL_findwriteunit(struct NFTLrecord *nftl, unsigned block)
//u16 startEUN = nftl->EUNtable[thisVUC];
//printk("Write to VirtualUnitChain %d, calling makefreeblock()\n", thisVUC);
- writeEUN = NFTL_makefreeblock(nftl, 0xffff);
+ writeEUN = NFTL_makefreeblock(nftl, BLOCK_NIL);
if (writeEUN == BLOCK_NIL) {
/* OK, we accept that the above comment is
@@ -673,7 +673,7 @@ static inline u16 NFTL_findwriteunit(struct NFTLrecord *nftl, unsigned block)
printk(KERN_WARNING "Error folding to make room for Virtual Unit Chain 0x%x\n",
thisVUC);
- return 0xffff;
+ return BLOCK_NIL;
}
static int nftl_writeblock(struct mtd_blktrans_dev *mbd, unsigned long block,
diff --git a/drivers/mtd/onenand/omap2.c b/drivers/mtd/onenand/omap2.c
index 6391e3dc8002..38d656b9b2ee 100644
--- a/drivers/mtd/onenand/omap2.c
+++ b/drivers/mtd/onenand/omap2.c
@@ -565,7 +565,7 @@ int omap2_onenand_rephase(void)
NULL, __adjust_timing);
}
-static void __devexit omap2_onenand_shutdown(struct platform_device *pdev)
+static void omap2_onenand_shutdown(struct platform_device *pdev)
{
struct omap2_onenand *c = dev_get_drvdata(&pdev->dev);
@@ -777,7 +777,7 @@ static int __devexit omap2_onenand_remove(struct platform_device *pdev)
static struct platform_driver omap2_onenand_driver = {
.probe = omap2_onenand_probe,
- .remove = omap2_onenand_remove,
+ .remove = __devexit_p(omap2_onenand_remove),
.shutdown = omap2_onenand_shutdown,
.driver = {
.name = DRIVER_NAME,
diff --git a/drivers/mtd/onenand/onenand_base.c b/drivers/mtd/onenand/onenand_base.c
index 30d6999e5f9f..6e829095ea9d 100644
--- a/drivers/mtd/onenand/onenand_base.c
+++ b/drivers/mtd/onenand/onenand_base.c
@@ -9,6 +9,10 @@
* auto-placement support, read-while load support, various fixes
* Copyright (C) Nokia Corporation, 2007
*
+ * Vishak G <vishak.g at samsung.com>, Rohit Hagargundgi <h.rohit at samsung.com>
+ * Flex-OneNAND support
+ * Copyright (C) Samsung Electronics, 2008
+ *
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
@@ -16,6 +20,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/moduleparam.h>
#include <linux/init.h>
#include <linux/sched.h>
#include <linux/delay.h>
@@ -27,6 +32,38 @@
#include <asm/io.h>
+/* Default Flex-OneNAND boundary and lock respectively */
+static int flex_bdry[MAX_DIES * 2] = { -1, 0, -1, 0 };
+
+module_param_array(flex_bdry, int, NULL, 0400);
+MODULE_PARM_DESC(flex_bdry, "SLC Boundary information for Flex-OneNAND"
+ "Syntax:flex_bdry=DIE_BDRY,LOCK,..."
+ "DIE_BDRY: SLC boundary of the die"
+ "LOCK: Locking information for SLC boundary"
+ " : 0->Set boundary in unlocked status"
+ " : 1->Set boundary in locked status");
+
+/**
+ * onenand_oob_128 - oob info for Flex-Onenand with 4KB page
+ * For now, we expose only 64 out of 80 ecc bytes
+ */
+static struct nand_ecclayout onenand_oob_128 = {
+ .eccbytes = 64,
+ .eccpos = {
+ 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
+ 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+ 38, 39, 40, 41, 42, 43, 44, 45, 46, 47,
+ 54, 55, 56, 57, 58, 59, 60, 61, 62, 63,
+ 70, 71, 72, 73, 74, 75, 76, 77, 78, 79,
+ 86, 87, 88, 89, 90, 91, 92, 93, 94, 95,
+ 102, 103, 104, 105
+ },
+ .oobfree = {
+ {2, 4}, {18, 4}, {34, 4}, {50, 4},
+ {66, 4}, {82, 4}, {98, 4}, {114, 4}
+ }
+};
+
/**
* onenand_oob_64 - oob info for large (2KB) page
*/
@@ -65,6 +102,14 @@ static const unsigned char ffchars[] = {
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 48 */
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 64 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 80 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 96 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 112 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 128 */
};
/**
@@ -171,6 +216,70 @@ static int onenand_buffer_address(int dataram1, int sectors, int count)
}
/**
+ * flexonenand_block- For given address return block number
+ * @param this - OneNAND device structure
+ * @param addr - Address for which block number is needed
+ */
+static unsigned flexonenand_block(struct onenand_chip *this, loff_t addr)
+{
+ unsigned boundary, blk, die = 0;
+
+ if (ONENAND_IS_DDP(this) && addr >= this->diesize[0]) {
+ die = 1;
+ addr -= this->diesize[0];
+ }
+
+ boundary = this->boundary[die];
+
+ blk = addr >> (this->erase_shift - 1);
+ if (blk > boundary)
+ blk = (blk + boundary + 1) >> 1;
+
+ blk += die ? this->density_mask : 0;
+ return blk;
+}
+
+inline unsigned onenand_block(struct onenand_chip *this, loff_t addr)
+{
+ if (!FLEXONENAND(this))
+ return addr >> this->erase_shift;
+ return flexonenand_block(this, addr);
+}
+
+/**
+ * flexonenand_addr - Return address of the block
+ * @this: OneNAND device structure
+ * @block: Block number on Flex-OneNAND
+ *
+ * Return address of the block
+ */
+static loff_t flexonenand_addr(struct onenand_chip *this, int block)
+{
+ loff_t ofs = 0;
+ int die = 0, boundary;
+
+ if (ONENAND_IS_DDP(this) && block >= this->density_mask) {
+ block -= this->density_mask;
+ die = 1;
+ ofs = this->diesize[0];
+ }
+
+ boundary = this->boundary[die];
+ ofs += (loff_t)block << (this->erase_shift - 1);
+ if (block > (boundary + 1))
+ ofs += (loff_t)(block - boundary - 1) << (this->erase_shift - 1);
+ return ofs;
+}
+
+loff_t onenand_addr(struct onenand_chip *this, int block)
+{
+ if (!FLEXONENAND(this))
+ return (loff_t)block << this->erase_shift;
+ return flexonenand_addr(this, block);
+}
+EXPORT_SYMBOL(onenand_addr);
+
+/**
* onenand_get_density - [DEFAULT] Get OneNAND density
* @param dev_id OneNAND device ID
*
@@ -183,6 +292,22 @@ static inline int onenand_get_density(int dev_id)
}
/**
+ * flexonenand_region - [Flex-OneNAND] Return erase region of addr
+ * @param mtd MTD device structure
+ * @param addr address whose erase region needs to be identified
+ */
+int flexonenand_region(struct mtd_info *mtd, loff_t addr)
+{
+ int i;
+
+ for (i = 0; i < mtd->numeraseregions; i++)
+ if (addr < mtd->eraseregions[i].offset)
+ break;
+ return i - 1;
+}
+EXPORT_SYMBOL(flexonenand_region);
+
+/**
* onenand_command - [DEFAULT] Send command to OneNAND device
* @param mtd MTD device structure
* @param cmd the command to be sent
@@ -207,16 +332,28 @@ static int onenand_command(struct mtd_info *mtd, int cmd, loff_t addr, size_t le
page = -1;
break;
+ case FLEXONENAND_CMD_PI_ACCESS:
+ /* addr contains die index */
+ block = addr * this->density_mask;
+ page = -1;
+ break;
+
case ONENAND_CMD_ERASE:
case ONENAND_CMD_BUFFERRAM:
case ONENAND_CMD_OTP_ACCESS:
- block = (int) (addr >> this->erase_shift);
+ block = onenand_block(this, addr);
page = -1;
break;
+ case FLEXONENAND_CMD_READ_PI:
+ cmd = ONENAND_CMD_READ;
+ block = addr * this->density_mask;
+ page = 0;
+ break;
+
default:
- block = (int) (addr >> this->erase_shift);
- page = (int) (addr >> this->page_shift);
+ block = onenand_block(this, addr);
+ page = (int) (addr - onenand_addr(this, block)) >> this->page_shift;
if (ONENAND_IS_2PLANE(this)) {
/* Make the even block number */
@@ -236,7 +373,7 @@ static int onenand_command(struct mtd_info *mtd, int cmd, loff_t addr, size_t le
value = onenand_bufferram_address(this, block);
this->write_word(value, this->base + ONENAND_REG_START_ADDRESS2);
- if (ONENAND_IS_2PLANE(this))
+ if (ONENAND_IS_MLC(this) || ONENAND_IS_2PLANE(this))
/* It is always BufferRAM0 */
ONENAND_SET_BUFFERRAM0(this);
else
@@ -258,13 +395,18 @@ static int onenand_command(struct mtd_info *mtd, int cmd, loff_t addr, size_t le
if (page != -1) {
/* Now we use page size operation */
- int sectors = 4, count = 4;
+ int sectors = 0, count = 0;
int dataram;
switch (cmd) {
+ case FLEXONENAND_CMD_RECOVER_LSB:
case ONENAND_CMD_READ:
case ONENAND_CMD_READOOB:
- dataram = ONENAND_SET_NEXT_BUFFERRAM(this);
+ if (ONENAND_IS_MLC(this))
+ /* It is always BufferRAM0 */
+ dataram = ONENAND_SET_BUFFERRAM0(this);
+ else
+ dataram = ONENAND_SET_NEXT_BUFFERRAM(this);
break;
default:
@@ -293,6 +435,30 @@ static int onenand_command(struct mtd_info *mtd, int cmd, loff_t addr, size_t le
}
/**
+ * onenand_read_ecc - return ecc status
+ * @param this onenand chip structure
+ */
+static inline int onenand_read_ecc(struct onenand_chip *this)
+{
+ int ecc, i, result = 0;
+
+ if (!FLEXONENAND(this))
+ return this->read_word(this->base + ONENAND_REG_ECC_STATUS);
+
+ for (i = 0; i < 4; i++) {
+ ecc = this->read_word(this->base + ONENAND_REG_ECC_STATUS + i);
+ if (likely(!ecc))
+ continue;
+ if (ecc & FLEXONENAND_UNCORRECTABLE_ERROR)
+ return ONENAND_ECC_2BIT_ALL;
+ else
+ result = ONENAND_ECC_1BIT_ALL;
+ }
+
+ return result;
+}
+
+/**
* onenand_wait - [DEFAULT] wait until the command is done
* @param mtd MTD device structure
* @param state state to select the max. timeout value
@@ -331,14 +497,14 @@ static int onenand_wait(struct mtd_info *mtd, int state)
* power off recovery (POR) test, it should read ECC status first
*/
if (interrupt & ONENAND_INT_READ) {
- int ecc = this->read_word(this->base + ONENAND_REG_ECC_STATUS);
+ int ecc = onenand_read_ecc(this);
if (ecc) {
if (ecc & ONENAND_ECC_2BIT_ALL) {
printk(KERN_ERR "onenand_wait: ECC error = 0x%04x\n", ecc);
mtd->ecc_stats.failed++;
return -EBADMSG;
} else if (ecc & ONENAND_ECC_1BIT_ALL) {
- printk(KERN_INFO "onenand_wait: correctable ECC error = 0x%04x\n", ecc);
+ printk(KERN_DEBUG "onenand_wait: correctable ECC error = 0x%04x\n", ecc);
mtd->ecc_stats.corrected++;
}
}
@@ -656,7 +822,7 @@ static int onenand_check_bufferram(struct mtd_info *mtd, loff_t addr)
if (found && ONENAND_IS_DDP(this)) {
/* Select DataRAM for DDP */
- int block = (int) (addr >> this->erase_shift);
+ int block = onenand_block(this, addr);
int value = onenand_bufferram_address(this, block);
this->write_word(value, this->base + ONENAND_REG_START_ADDRESS2);
}
@@ -816,6 +982,149 @@ static int onenand_transfer_auto_oob(struct mtd_info *mtd, uint8_t *buf, int col
}
/**
+ * onenand_recover_lsb - [Flex-OneNAND] Recover LSB page data
+ * @param mtd MTD device structure
+ * @param addr address to recover
+ * @param status return value from onenand_wait / onenand_bbt_wait
+ *
+ * MLC NAND Flash cell has paired pages - LSB page and MSB page. LSB page has
+ * lower page address and MSB page has higher page address in paired pages.
+ * If power off occurs during MSB page program, the paired LSB page data can
+ * become corrupt. LSB page recovery read is a way to read LSB page though page
+ * data are corrupted. When uncorrectable error occurs as a result of LSB page
+ * read after power up, issue LSB page recovery read.
+ */
+static int onenand_recover_lsb(struct mtd_info *mtd, loff_t addr, int status)
+{
+ struct onenand_chip *this = mtd->priv;
+ int i;
+
+ /* Recovery is only for Flex-OneNAND */
+ if (!FLEXONENAND(this))
+ return status;
+
+ /* check if we failed due to uncorrectable error */
+ if (status != -EBADMSG && status != ONENAND_BBT_READ_ECC_ERROR)
+ return status;
+
+ /* check if address lies in MLC region */
+ i = flexonenand_region(mtd, addr);
+ if (mtd->eraseregions[i].erasesize < (1 << this->erase_shift))
+ return status;
+
+ /* We are attempting to reread, so decrement stats.failed
+ * which was incremented by onenand_wait due to read failure
+ */
+ printk(KERN_INFO "onenand_recover_lsb: Attempting to recover from uncorrectable read\n");
+ mtd->ecc_stats.failed--;
+
+ /* Issue the LSB page recovery command */
+ this->command(mtd, FLEXONENAND_CMD_RECOVER_LSB, addr, this->writesize);
+ return this->wait(mtd, FL_READING);
+}
+
+/**
+ * onenand_mlc_read_ops_nolock - MLC OneNAND read main and/or out-of-band
+ * @param mtd MTD device structure
+ * @param from offset to read from
+ * @param ops: oob operation description structure
+ *
+ * MLC OneNAND / Flex-OneNAND has 4KB page size and 4KB dataram.
+ * So, read-while-load is not present.
+ */
+static int onenand_mlc_read_ops_nolock(struct mtd_info *mtd, loff_t from,
+ struct mtd_oob_ops *ops)
+{
+ struct onenand_chip *this = mtd->priv;
+ struct mtd_ecc_stats stats;
+ size_t len = ops->len;
+ size_t ooblen = ops->ooblen;
+ u_char *buf = ops->datbuf;
+ u_char *oobbuf = ops->oobbuf;
+ int read = 0, column, thislen;
+ int oobread = 0, oobcolumn, thisooblen, oobsize;
+ int ret = 0;
+ int writesize = this->writesize;
+
+ DEBUG(MTD_DEBUG_LEVEL3, "onenand_mlc_read_ops_nolock: from = 0x%08x, len = %i\n", (unsigned int) from, (int) len);
+
+ if (ops->mode == MTD_OOB_AUTO)
+ oobsize = this->ecclayout->oobavail;
+ else
+ oobsize = mtd->oobsize;
+
+ oobcolumn = from & (mtd->oobsize - 1);
+
+ /* Do not allow reads past end of device */
+ if (from + len > mtd->size) {
+ printk(KERN_ERR "onenand_mlc_read_ops_nolock: Attempt read beyond end of device\n");
+ ops->retlen = 0;
+ ops->oobretlen = 0;
+ return -EINVAL;
+ }
+
+ stats = mtd->ecc_stats;
+
+ while (read < len) {
+ cond_resched();
+
+ thislen = min_t(int, writesize, len - read);
+
+ column = from & (writesize - 1);
+ if (column + thislen > writesize)
+ thislen = writesize - column;
+
+ if (!onenand_check_bufferram(mtd, from)) {
+ this->command(mtd, ONENAND_CMD_READ, from, writesize);
+
+ ret = this->wait(mtd, FL_READING);
+ if (unlikely(ret))
+ ret = onenand_recover_lsb(mtd, from, ret);
+ onenand_update_bufferram(mtd, from, !ret);
+ if (ret == -EBADMSG)
+ ret = 0;
+ }
+
+ this->read_bufferram(mtd, ONENAND_DATARAM, buf, column, thislen);
+ if (oobbuf) {
+ thisooblen = oobsize - oobcolumn;
+ thisooblen = min_t(int, thisooblen, ooblen - oobread);
+
+ if (ops->mode == MTD_OOB_AUTO)
+ onenand_transfer_auto_oob(mtd, oobbuf, oobcolumn, thisooblen);
+ else
+ this->read_bufferram(mtd, ONENAND_SPARERAM, oobbuf, oobcolumn, thisooblen);
+ oobread += thisooblen;
+ oobbuf += thisooblen;
+ oobcolumn = 0;
+ }
+
+ read += thislen;
+ if (read == len)
+ break;
+
+ from += thislen;
+ buf += thislen;
+ }
+
+ /*
+ * Return success, if no ECC failures, else -EBADMSG
+ * fs driver will take care of that, because
+ * retlen == desired len and result == -EBADMSG
+ */
+ ops->retlen = read;
+ ops->oobretlen = oobread;
+
+ if (ret)
+ return ret;
+
+ if (mtd->ecc_stats.failed - stats.failed)
+ return -EBADMSG;
+
+ return mtd->ecc_stats.corrected - stats.corrected ? -EUCLEAN : 0;
+}
+
+/**
* onenand_read_ops_nolock - [OneNAND Interface] OneNAND read main and/or out-of-band
* @param mtd MTD device structure
* @param from offset to read from
@@ -962,7 +1271,7 @@ static int onenand_read_oob_nolock(struct mtd_info *mtd, loff_t from,
size_t len = ops->ooblen;
mtd_oob_mode_t mode = ops->mode;
u_char *buf = ops->oobbuf;
- int ret = 0;
+ int ret = 0, readcmd;
from += ops->ooboffs;
@@ -993,17 +1302,22 @@ static int onenand_read_oob_nolock(struct mtd_info *mtd, loff_t from,
stats = mtd->ecc_stats;
+ readcmd = ONENAND_IS_MLC(this) ? ONENAND_CMD_READ : ONENAND_CMD_READOOB;
+
while (read < len) {
cond_resched();
thislen = oobsize - column;
thislen = min_t(int, thislen, len);
- this->command(mtd, ONENAND_CMD_READOOB, from, mtd->oobsize);
+ this->command(mtd, readcmd, from, mtd->oobsize);
onenand_update_bufferram(mtd, from, 0);
ret = this->wait(mtd, FL_READING);
+ if (unlikely(ret))
+ ret = onenand_recover_lsb(mtd, from, ret);
+
if (ret && ret != -EBADMSG) {
printk(KERN_ERR "onenand_read_oob_nolock: read failed = 0x%x\n", ret);
break;
@@ -1053,6 +1367,7 @@ static int onenand_read_oob_nolock(struct mtd_info *mtd, loff_t from,
static int onenand_read(struct mtd_info *mtd, loff_t from, size_t len,
size_t *retlen, u_char *buf)
{
+ struct onenand_chip *this = mtd->priv;
struct mtd_oob_ops ops = {
.len = len,
.ooblen = 0,
@@ -1062,7 +1377,9 @@ static int onenand_read(struct mtd_info *mtd, loff_t from, size_t len,
int ret;
onenand_get_device(mtd, FL_READING);
- ret = onenand_read_ops_nolock(mtd, from, &ops);
+ ret = ONENAND_IS_MLC(this) ?
+ onenand_mlc_read_ops_nolock(mtd, from, &ops) :
+ onenand_read_ops_nolock(mtd, from, &ops);
onenand_release_device(mtd);
*retlen = ops.retlen;
@@ -1080,6 +1397,7 @@ static int onenand_read(struct mtd_info *mtd, loff_t from, size_t len,
static int onenand_read_oob(struct mtd_info *mtd, loff_t from,
struct mtd_oob_ops *ops)
{
+ struct onenand_chip *this = mtd->priv;
int ret;
switch (ops->mode) {
@@ -1094,7 +1412,9 @@ static int onenand_read_oob(struct mtd_info *mtd, loff_t from,
onenand_get_device(mtd, FL_READING);
if (ops->datbuf)
- ret = onenand_read_ops_nolock(mtd, from, ops);
+ ret = ONENAND_IS_MLC(this) ?
+ onenand_mlc_read_ops_nolock(mtd, from, ops) :
+ onenand_read_ops_nolock(mtd, from, ops);
else
ret = onenand_read_oob_nolock(mtd, from, ops);
onenand_release_device(mtd);
@@ -1128,11 +1448,11 @@ static int onenand_bbt_wait(struct mtd_info *mtd, int state)
ctrl = this->read_word(this->base + ONENAND_REG_CTRL_STATUS);
if (interrupt & ONENAND_INT_READ) {
- int ecc = this->read_word(this->base + ONENAND_REG_ECC_STATUS);
+ int ecc = onenand_read_ecc(this);
if (ecc & ONENAND_ECC_2BIT_ALL) {
printk(KERN_INFO "onenand_bbt_wait: ecc error = 0x%04x"
", controller error 0x%04x\n", ecc, ctrl);
- return ONENAND_BBT_READ_ERROR;
+ return ONENAND_BBT_READ_ECC_ERROR;
}
} else {
printk(KERN_ERR "onenand_bbt_wait: read timeout!"
@@ -1163,7 +1483,7 @@ int onenand_bbt_read_oob(struct mtd_info *mtd, loff_t from,
{
struct onenand_chip *this = mtd->priv;
int read = 0, thislen, column;
- int ret = 0;
+ int ret = 0, readcmd;
size_t len = ops->ooblen;
u_char *buf = ops->oobbuf;
@@ -1183,17 +1503,22 @@ int onenand_bbt_read_oob(struct mtd_info *mtd, loff_t from,
column = from & (mtd->oobsize - 1);
+ readcmd = ONENAND_IS_MLC(this) ? ONENAND_CMD_READ : ONENAND_CMD_READOOB;
+
while (read < len) {
cond_resched();
thislen = mtd->oobsize - column;
thislen = min_t(int, thislen, len);
- this->command(mtd, ONENAND_CMD_READOOB, from, mtd->oobsize);
+ this->command(mtd, readcmd, from, mtd->oobsize);
onenand_update_bufferram(mtd, from, 0);
- ret = onenand_bbt_wait(mtd, FL_READING);
+ ret = this->bbt_wait(mtd, FL_READING);
+ if (unlikely(ret))
+ ret = onenand_recover_lsb(mtd, from, ret);
+
if (ret)
break;
@@ -1230,9 +1555,11 @@ static int onenand_verify_oob(struct mtd_info *mtd, const u_char *buf, loff_t to
{
struct onenand_chip *this = mtd->priv;
u_char *oob_buf = this->oob_buf;
- int status, i;
+ int status, i, readcmd;
- this->command(mtd, ONENAND_CMD_READOOB, to, mtd->oobsize);
+ readcmd = ONENAND_IS_MLC(this) ? ONENAND_CMD_READ : ONENAND_CMD_READOOB;
+
+ this->command(mtd, readcmd, to, mtd->oobsize);
onenand_update_bufferram(mtd, to, 0);
status = this->wait(mtd, FL_READING);
if (status)
@@ -1633,7 +1960,7 @@ static int onenand_write_oob_nolock(struct mtd_info *mtd, loff_t to,
{
struct onenand_chip *this = mtd->priv;
int column, ret = 0, oobsize;
- int written = 0;
+ int written = 0, oobcmd;
u_char *oobbuf;
size_t len = ops->ooblen;
const u_char *buf = ops->oobbuf;
@@ -1675,6 +2002,8 @@ static int onenand_write_oob_nolock(struct mtd_info *mtd, loff_t to,
oobbuf = this->oob_buf;
+ oobcmd = ONENAND_IS_MLC(this) ? ONENAND_CMD_PROG : ONENAND_CMD_PROGOOB;
+
/* Loop until all data write */
while (written < len) {
int thislen = min_t(int, oobsize, len - written);
@@ -1692,7 +2021,14 @@ static int onenand_write_oob_nolock(struct mtd_info *mtd, loff_t to,
memcpy(oobbuf + column, buf, thislen);
this->write_bufferram(mtd, ONENAND_SPARERAM, oobbuf, 0, mtd->oobsize);
- this->command(mtd, ONENAND_CMD_PROGOOB, to, mtd->oobsize);
+ if (ONENAND_IS_MLC(this)) {
+ /* Set main area of DataRAM to 0xff*/
+ memset(this->page_buf, 0xff, mtd->writesize);
+ this->write_bufferram(mtd, ONENAND_DATARAM,
+ this->page_buf, 0, mtd->writesize);
+ }
+
+ this->command(mtd, oobcmd, to, mtd->oobsize);
onenand_update_bufferram(mtd, to, 0);
if (ONENAND_IS_2PLANE(this)) {
@@ -1815,29 +2151,48 @@ static int onenand_erase(struct mtd_info *mtd, struct erase_info *instr)
{
struct onenand_chip *this = mtd->priv;
unsigned int block_size;
- loff_t addr;
- int len;
- int ret = 0;
+ loff_t addr = instr->addr;
+ loff_t len = instr->len;
+ int ret = 0, i;
+ struct mtd_erase_region_info *region = NULL;
+ loff_t region_end = 0;
DEBUG(MTD_DEBUG_LEVEL3, "onenand_erase: start = 0x%012llx, len = %llu\n", (unsigned long long) instr->addr, (unsigned long long) instr->len);
- block_size = (1 << this->erase_shift);
-
- /* Start address must align on block boundary */
- if (unlikely(instr->addr & (block_size - 1))) {
- printk(KERN_ERR "onenand_erase: Unaligned address\n");
+ /* Do not allow erase past end of device */
+ if (unlikely((len + addr) > mtd->size)) {
+ printk(KERN_ERR "onenand_erase: Erase past end of device\n");
return -EINVAL;
}
- /* Length must align on block boundary */
- if (unlikely(instr->len & (block_size - 1))) {
- printk(KERN_ERR "onenand_erase: Length not block aligned\n");
- return -EINVAL;
+ if (FLEXONENAND(this)) {
+ /* Find the eraseregion of this address */
+ i = flexonenand_region(mtd, addr);
+ region = &mtd->eraseregions[i];
+
+ block_size = region->erasesize;
+ region_end = region->offset + region->erasesize * region->numblocks;
+
+ /* Start address within region must align on block boundary.
+ * Erase region's start offset is always block start address.
+ */
+ if (unlikely((addr - region->offset) & (block_size - 1))) {
+ printk(KERN_ERR "onenand_erase: Unaligned address\n");
+ return -EINVAL;
+ }
+ } else {
+ block_size = 1 << this->erase_shift;
+
+ /* Start address must align on block boundary */
+ if (unlikely(addr & (block_size - 1))) {
+ printk(KERN_ERR "onenand_erase: Unaligned address\n");
+ return -EINVAL;
+ }
}
- /* Do not allow erase past end of device */
- if (unlikely((instr->len + instr->addr) > mtd->size)) {
- printk(KERN_ERR "onenand_erase: Erase past end of device\n");
+ /* Length must align on block boundary */
+ if (unlikely(len & (block_size - 1))) {
+ printk(KERN_ERR "onenand_erase: Length not block aligned\n");
return -EINVAL;
}
@@ -1847,9 +2202,6 @@ static int onenand_erase(struct mtd_info *mtd, struct erase_info *instr)
onenand_get_device(mtd, FL_ERASING);
/* Loop throught the pages */
- len = instr->len;
- addr = instr->addr;
-
instr->state = MTD_ERASING;
while (len) {
@@ -1869,7 +2221,8 @@ static int onenand_erase(struct mtd_info *mtd, struct erase_info *instr)
ret = this->wait(mtd, FL_ERASING);
/* Check, if it is write protected */
if (ret) {
- printk(KERN_ERR "onenand_erase: Failed erase, block %d\n", (unsigned) (addr >> this->erase_shift));
+ printk(KERN_ERR "onenand_erase: Failed erase, block %d\n",
+ onenand_block(this, addr));
instr->state = MTD_ERASE_FAILED;
instr->fail_addr = addr;
goto erase_exit;
@@ -1877,6 +2230,22 @@ static int onenand_erase(struct mtd_info *mtd, struct erase_info *instr)
len -= block_size;
addr += block_size;
+
+ if (addr == region_end) {
+ if (!len)
+ break;
+ region++;
+
+ block_size = region->erasesize;
+ region_end = region->offset + region->erasesize * region->numblocks;
+
+ if (len & (block_size - 1)) {
+ /* FIXME: This should be handled at MTD partitioning level. */
+ printk(KERN_ERR "onenand_erase: Unaligned address\n");
+ goto erase_exit;
+ }
+ }
+
}
instr->state = MTD_ERASE_DONE;
@@ -1955,13 +2324,17 @@ static int onenand_default_block_markbad(struct mtd_info *mtd, loff_t ofs)
int block;
/* Get block number */
- block = ((int) ofs) >> bbm->bbt_erase_shift;
+ block = onenand_block(this, ofs);
if (bbm->bbt)
bbm->bbt[block >> 2] |= 0x01 << ((block & 0x03) << 1);
/* We write two bytes, so we dont have to mess with 16 bit access */
ofs += mtd->oobsize + (bbm->badblockpos & ~0x01);
- return onenand_write_oob_nolock(mtd, ofs, &ops);
+ /* FIXME : What to do when marking SLC block in partition
+ * with MLC erasesize? For now, it is not advisable to
+ * create partitions containing both SLC and MLC regions.
+ */
+ return onenand_write_oob_nolock(mtd, ofs, &ops);
}
/**
@@ -2005,8 +2378,8 @@ static int onenand_do_lock_cmd(struct mtd_info *mtd, loff_t ofs, size_t len, int
int start, end, block, value, status;
int wp_status_mask;
- start = ofs >> this->erase_shift;
- end = len >> this->erase_shift;
+ start = onenand_block(this, ofs);
+ end = onenand_block(this, ofs + len) - 1;
if (cmd == ONENAND_CMD_LOCK)
wp_status_mask = ONENAND_WP_LS;
@@ -2018,7 +2391,7 @@ static int onenand_do_lock_cmd(struct mtd_info *mtd, loff_t ofs, size_t len, int
/* Set start block address */
this->write_word(start, this->base + ONENAND_REG_START_BLOCK_ADDRESS);
/* Set end block address */
- this->write_word(start + end - 1, this->base + ONENAND_REG_END_BLOCK_ADDRESS);
+ this->write_word(end, this->base + ONENAND_REG_END_BLOCK_ADDRESS);
/* Write lock command */
this->command(mtd, cmd, 0, 0);
@@ -2039,7 +2412,7 @@ static int onenand_do_lock_cmd(struct mtd_info *mtd, loff_t ofs, size_t len, int
}
/* Block lock scheme */
- for (block = start; block < start + end; block++) {
+ for (block = start; block < end + 1; block++) {
/* Set block address */
value = onenand_block_address(this, block);
this->write_word(value, this->base + ONENAND_REG_START_ADDRESS1);
@@ -2147,7 +2520,7 @@ static void onenand_unlock_all(struct mtd_info *mtd)
{
struct onenand_chip *this = mtd->priv;
loff_t ofs = 0;
- size_t len = this->chipsize;
+ loff_t len = mtd->size;
if (this->options & ONENAND_HAS_UNLOCK_ALL) {
/* Set start block address */
@@ -2163,12 +2536,16 @@ static void onenand_unlock_all(struct mtd_info *mtd)
& ONENAND_CTRL_ONGO)
continue;
+ /* Don't check lock status */
+ if (this->options & ONENAND_SKIP_UNLOCK_CHECK)
+ return;
+
/* Check lock status */
if (onenand_check_lock_status(this))
return;
/* Workaround for all block unlock in DDP */
- if (ONENAND_IS_DDP(this)) {
+ if (ONENAND_IS_DDP(this) && !FLEXONENAND(this)) {
/* All blocks on another chip */
ofs = this->chipsize >> 1;
len = this->chipsize >> 1;
@@ -2210,7 +2587,9 @@ static int do_otp_read(struct mtd_info *mtd, loff_t from, size_t len,
this->command(mtd, ONENAND_CMD_OTP_ACCESS, 0, 0);
this->wait(mtd, FL_OTPING);
- ret = onenand_read_ops_nolock(mtd, from, &ops);
+ ret = ONENAND_IS_MLC(this) ?
+ onenand_mlc_read_ops_nolock(mtd, from, &ops) :
+ onenand_read_ops_nolock(mtd, from, &ops);
/* Exit OTP access mode */
this->command(mtd, ONENAND_CMD_RESET, 0, 0);
@@ -2277,21 +2656,32 @@ static int do_otp_lock(struct mtd_info *mtd, loff_t from, size_t len,
size_t *retlen, u_char *buf)
{
struct onenand_chip *this = mtd->priv;
- struct mtd_oob_ops ops = {
- .mode = MTD_OOB_PLACE,
- .ooblen = len,
- .oobbuf = buf,
- .ooboffs = 0,
- };
+ struct mtd_oob_ops ops;
int ret;
/* Enter OTP access mode */
this->command(mtd, ONENAND_CMD_OTP_ACCESS, 0, 0);
this->wait(mtd, FL_OTPING);
- ret = onenand_write_oob_nolock(mtd, from, &ops);
-
- *retlen = ops.oobretlen;
+ if (FLEXONENAND(this)) {
+ /*
+ * For Flex-OneNAND, we write lock mark to 1st word of sector 4 of
+ * main area of page 49.
+ */
+ ops.len = mtd->writesize;
+ ops.ooblen = 0;
+ ops.datbuf = buf;
+ ops.oobbuf = NULL;
+ ret = onenand_write_ops_nolock(mtd, mtd->writesize * 49, &ops);
+ *retlen = ops.retlen;
+ } else {
+ ops.mode = MTD_OOB_PLACE;
+ ops.ooblen = len;
+ ops.oobbuf = buf;
+ ops.ooboffs = 0;
+ ret = onenand_write_oob_nolock(mtd, from, &ops);
+ *retlen = ops.oobretlen;
+ }
/* Exit OTP access mode */
this->command(mtd, ONENAND_CMD_RESET, 0, 0);
@@ -2475,27 +2865,34 @@ static int onenand_lock_user_prot_reg(struct mtd_info *mtd, loff_t from,
size_t len)
{
struct onenand_chip *this = mtd->priv;
- u_char *oob_buf = this->oob_buf;
+ u_char *buf = FLEXONENAND(this) ? this->page_buf : this->oob_buf;
size_t retlen;
int ret;
- memset(oob_buf, 0xff, mtd->oobsize);
+ memset(buf, 0xff, FLEXONENAND(this) ? this->writesize
+ : mtd->oobsize);
/*
* Note: OTP lock operation
* OTP block : 0xXXFC
* 1st block : 0xXXF3 (If chip support)
* Both : 0xXXF0 (If chip support)
*/
- oob_buf[ONENAND_OTP_LOCK_OFFSET] = 0xFC;
+ if (FLEXONENAND(this))
+ buf[FLEXONENAND_OTP_LOCK_OFFSET] = 0xFC;
+ else
+ buf[ONENAND_OTP_LOCK_OFFSET] = 0xFC;
/*
* Write lock mark to 8th word of sector0 of page0 of the spare0.
* We write 16 bytes spare area instead of 2 bytes.
+ * For Flex-OneNAND, we write lock mark to 1st word of sector 4 of
+ * main area of page 49.
*/
+
from = 0;
- len = 16;
+ len = FLEXONENAND(this) ? mtd->writesize : 16;
- ret = onenand_otp_walk(mtd, from, len, &retlen, oob_buf, do_otp_lock, MTD_OTP_USER);
+ ret = onenand_otp_walk(mtd, from, len, &retlen, buf, do_otp_lock, MTD_OTP_USER);
return ret ? : retlen;
}
@@ -2542,6 +2939,14 @@ static void onenand_check_features(struct mtd_info *mtd)
break;
}
+ if (ONENAND_IS_MLC(this))
+ this->options &= ~ONENAND_HAS_2PLANE;
+
+ if (FLEXONENAND(this)) {
+ this->options &= ~ONENAND_HAS_CONT_LOCK;
+ this->options |= ONENAND_HAS_UNLOCK_ALL;
+ }
+
if (this->options & ONENAND_HAS_CONT_LOCK)
printk(KERN_DEBUG "Lock scheme is Continuous Lock\n");
if (this->options & ONENAND_HAS_UNLOCK_ALL)
@@ -2559,14 +2964,16 @@ static void onenand_check_features(struct mtd_info *mtd)
*/
static void onenand_print_device_info(int device, int version)
{
- int vcc, demuxed, ddp, density;
+ int vcc, demuxed, ddp, density, flexonenand;
vcc = device & ONENAND_DEVICE_VCC_MASK;
demuxed = device & ONENAND_DEVICE_IS_DEMUX;
ddp = device & ONENAND_DEVICE_IS_DDP;
density = onenand_get_density(device);
- printk(KERN_INFO "%sOneNAND%s %dMB %sV 16-bit (0x%02x)\n",
- demuxed ? "" : "Muxed ",
+ flexonenand = device & DEVICE_IS_FLEXONENAND;
+ printk(KERN_INFO "%s%sOneNAND%s %dMB %sV 16-bit (0x%02x)\n",
+ demuxed ? "" : "Muxed ",
+ flexonenand ? "Flex-" : "",
ddp ? "(DDP)" : "",
(16 << density),
vcc ? "2.65/3.3" : "1.8",
@@ -2576,6 +2983,7 @@ static void onenand_print_device_info(int device, int version)
static const struct onenand_manufacturers onenand_manuf_ids[] = {
{ONENAND_MFR_SAMSUNG, "Samsung"},
+ {ONENAND_MFR_NUMONYX, "Numonyx"},
};
/**
@@ -2605,6 +3013,261 @@ static int onenand_check_maf(int manuf)
}
/**
+* flexonenand_get_boundary - Reads the SLC boundary
+* @param onenand_info - onenand info structure
+**/
+static int flexonenand_get_boundary(struct mtd_info *mtd)
+{
+ struct onenand_chip *this = mtd->priv;
+ unsigned die, bdry;
+ int ret, syscfg, locked;
+
+ /* Disable ECC */
+ syscfg = this->read_word(this->base + ONENAND_REG_SYS_CFG1);
+ this->write_word((syscfg | 0x0100), this->base + ONENAND_REG_SYS_CFG1);
+
+ for (die = 0; die < this->dies; die++) {
+ this->command(mtd, FLEXONENAND_CMD_PI_ACCESS, die, 0);
+ this->wait(mtd, FL_SYNCING);
+
+ this->command(mtd, FLEXONENAND_CMD_READ_PI, die, 0);
+ ret = this->wait(mtd, FL_READING);
+
+ bdry = this->read_word(this->base + ONENAND_DATARAM);
+ if ((bdry >> FLEXONENAND_PI_UNLOCK_SHIFT) == 3)
+ locked = 0;
+ else
+ locked = 1;
+ this->boundary[die] = bdry & FLEXONENAND_PI_MASK;
+
+ this->command(mtd, ONENAND_CMD_RESET, 0, 0);
+ ret = this->wait(mtd, FL_RESETING);
+
+ printk(KERN_INFO "Die %d boundary: %d%s\n", die,
+ this->boundary[die], locked ? "(Locked)" : "(Unlocked)");
+ }
+
+ /* Enable ECC */
+ this->write_word(syscfg, this->base + ONENAND_REG_SYS_CFG1);
+ return 0;
+}
+
+/**
+ * flexonenand_get_size - Fill up fields in onenand_chip and mtd_info
+ * boundary[], diesize[], mtd->size, mtd->erasesize
+ * @param mtd - MTD device structure
+ */
+static void flexonenand_get_size(struct mtd_info *mtd)
+{
+ struct onenand_chip *this = mtd->priv;
+ int die, i, eraseshift, density;
+ int blksperdie, maxbdry;
+ loff_t ofs;
+
+ density = onenand_get_density(this->device_id);
+ blksperdie = ((loff_t)(16 << density) << 20) >> (this->erase_shift);
+ blksperdie >>= ONENAND_IS_DDP(this) ? 1 : 0;
+ maxbdry = blksperdie - 1;
+ eraseshift = this->erase_shift - 1;
+
+ mtd->numeraseregions = this->dies << 1;
+
+ /* This fills up the device boundary */
+ flexonenand_get_boundary(mtd);
+ die = ofs = 0;
+ i = -1;
+ for (; die < this->dies; die++) {
+ if (!die || this->boundary[die-1] != maxbdry) {
+ i++;
+ mtd->eraseregions[i].offset = ofs;
+ mtd->eraseregions[i].erasesize = 1 << eraseshift;
+ mtd->eraseregions[i].numblocks =
+ this->boundary[die] + 1;
+ ofs += mtd->eraseregions[i].numblocks << eraseshift;
+ eraseshift++;
+ } else {
+ mtd->numeraseregions -= 1;
+ mtd->eraseregions[i].numblocks +=
+ this->boundary[die] + 1;
+ ofs += (this->boundary[die] + 1) << (eraseshift - 1);
+ }
+ if (this->boundary[die] != maxbdry) {
+ i++;
+ mtd->eraseregions[i].offset = ofs;
+ mtd->eraseregions[i].erasesize = 1 << eraseshift;
+ mtd->eraseregions[i].numblocks = maxbdry ^
+ this->boundary[die];
+ ofs += mtd->eraseregions[i].numblocks << eraseshift;
+ eraseshift--;
+ } else
+ mtd->numeraseregions -= 1;
+ }
+
+ /* Expose MLC erase size except when all blocks are SLC */
+ mtd->erasesize = 1 << this->erase_shift;
+ if (mtd->numeraseregions == 1)
+ mtd->erasesize >>= 1;
+
+ printk(KERN_INFO "Device has %d eraseregions\n", mtd->numeraseregions);
+ for (i = 0; i < mtd->numeraseregions; i++)
+ printk(KERN_INFO "[offset: 0x%08x, erasesize: 0x%05x,"
+ " numblocks: %04u]\n",
+ (unsigned int) mtd->eraseregions[i].offset,
+ mtd->eraseregions[i].erasesize,
+ mtd->eraseregions[i].numblocks);
+
+ for (die = 0, mtd->size = 0; die < this->dies; die++) {
+ this->diesize[die] = (loff_t)blksperdie << this->erase_shift;
+ this->diesize[die] -= (loff_t)(this->boundary[die] + 1)
+ << (this->erase_shift - 1);
+ mtd->size += this->diesize[die];
+ }
+}
+
+/**
+ * flexonenand_check_blocks_erased - Check if blocks are erased
+ * @param mtd_info - mtd info structure
+ * @param start - first erase block to check
+ * @param end - last erase block to check
+ *
+ * Converting an unerased block from MLC to SLC
+ * causes byte values to change. Since both data and its ECC
+ * have changed, reads on the block give uncorrectable error.
+ * This might lead to the block being detected as bad.
+ *
+ * Avoid this by ensuring that the block to be converted is
+ * erased.
+ */
+static int flexonenand_check_blocks_erased(struct mtd_info *mtd, int start, int end)
+{
+ struct onenand_chip *this = mtd->priv;
+ int i, ret;
+ int block;
+ struct mtd_oob_ops ops = {
+ .mode = MTD_OOB_PLACE,
+ .ooboffs = 0,
+ .ooblen = mtd->oobsize,
+ .datbuf = NULL,
+ .oobbuf = this->oob_buf,
+ };
+ loff_t addr;
+
+ printk(KERN_DEBUG "Check blocks from %d to %d\n", start, end);
+
+ for (block = start; block <= end; block++) {
+ addr = flexonenand_addr(this, block);
+ if (onenand_block_isbad_nolock(mtd, addr, 0))
+ continue;
+
+ /*
+ * Since main area write results in ECC write to spare,
+ * it is sufficient to check only ECC bytes for change.
+ */
+ ret = onenand_read_oob_nolock(mtd, addr, &ops);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < mtd->oobsize; i++)
+ if (this->oob_buf[i] != 0xff)
+ break;
+
+ if (i != mtd->oobsize) {
+ printk(KERN_WARNING "Block %d not erased.\n", block);
+ return 1;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * flexonenand_set_boundary - Writes the SLC boundary
+ * @param mtd - mtd info structure
+ */
+int flexonenand_set_boundary(struct mtd_info *mtd, int die,
+ int boundary, int lock)
+{
+ struct onenand_chip *this = mtd->priv;
+ int ret, density, blksperdie, old, new, thisboundary;
+ loff_t addr;
+
+ /* Change only once for SDP Flex-OneNAND */
+ if (die && (!ONENAND_IS_DDP(this)))
+ return 0;
+
+ /* boundary value of -1 indicates no required change */
+ if (boundary < 0 || boundary == this->boundary[die])
+ return 0;
+
+ density = onenand_get_density(this->device_id);
+ blksperdie = ((16 << density) << 20) >> this->erase_shift;
+ blksperdie >>= ONENAND_IS_DDP(this) ? 1 : 0;
+
+ if (boundary >= blksperdie) {
+ printk(KERN_ERR "flexonenand_set_boundary: Invalid boundary value. "
+ "Boundary not changed.\n");
+ return -EINVAL;
+ }
+
+ /* Check if converting blocks are erased */
+ old = this->boundary[die] + (die * this->density_mask);
+ new = boundary + (die * this->density_mask);
+ ret = flexonenand_check_blocks_erased(mtd, min(old, new) + 1, max(old, new));
+ if (ret) {
+ printk(KERN_ERR "flexonenand_set_boundary: Please erase blocks before boundary change\n");
+ return ret;
+ }
+
+ this->command(mtd, FLEXONENAND_CMD_PI_ACCESS, die, 0);
+ this->wait(mtd, FL_SYNCING);
+
+ /* Check is boundary is locked */
+ this->command(mtd, FLEXONENAND_CMD_READ_PI, die, 0);
+ ret = this->wait(mtd, FL_READING);
+
+ thisboundary = this->read_word(this->base + ONENAND_DATARAM);
+ if ((thisboundary >> FLEXONENAND_PI_UNLOCK_SHIFT) != 3) {
+ printk(KERN_ERR "flexonenand_set_boundary: boundary locked\n");
+ ret = 1;
+ goto out;
+ }
+
+ printk(KERN_INFO "flexonenand_set_boundary: Changing die %d boundary: %d%s\n",
+ die, boundary, lock ? "(Locked)" : "(Unlocked)");
+
+ addr = die ? this->diesize[0] : 0;
+
+ boundary &= FLEXONENAND_PI_MASK;
+ boundary |= lock ? 0 : (3 << FLEXONENAND_PI_UNLOCK_SHIFT);
+
+ this->command(mtd, ONENAND_CMD_ERASE, addr, 0);
+ ret = this->wait(mtd, FL_ERASING);
+ if (ret) {
+ printk(KERN_ERR "flexonenand_set_boundary: Failed PI erase for Die %d\n", die);
+ goto out;
+ }
+
+ this->write_word(boundary, this->base + ONENAND_DATARAM);
+ this->command(mtd, ONENAND_CMD_PROG, addr, 0);
+ ret = this->wait(mtd, FL_WRITING);
+ if (ret) {
+ printk(KERN_ERR "flexonenand_set_boundary: Failed PI write for Die %d\n", die);
+ goto out;
+ }
+
+ this->command(mtd, FLEXONENAND_CMD_PI_UPDATE, die, 0);
+ ret = this->wait(mtd, FL_WRITING);
+out:
+ this->write_word(ONENAND_CMD_RESET, this->base + ONENAND_REG_COMMAND);
+ this->wait(mtd, FL_RESETING);
+ if (!ret)
+ /* Recalculate device size on boundary change*/
+ flexonenand_get_size(mtd);
+
+ return ret;
+}
+
+/**
* onenand_probe - [OneNAND Interface] Probe the OneNAND device
* @param mtd MTD device structure
*
@@ -2621,7 +3284,7 @@ static int onenand_probe(struct mtd_info *mtd)
/* Save system configuration 1 */
syscfg = this->read_word(this->base + ONENAND_REG_SYS_CFG1);
/* Clear Sync. Burst Read mode to read BootRAM */
- this->write_word((syscfg & ~ONENAND_SYS_CFG1_SYNC_READ), this->base + ONENAND_REG_SYS_CFG1);
+ this->write_word((syscfg & ~ONENAND_SYS_CFG1_SYNC_READ & ~ONENAND_SYS_CFG1_SYNC_WRITE), this->base + ONENAND_REG_SYS_CFG1);
/* Send the command for reading device ID from BootRAM */
this->write_word(ONENAND_CMD_READID, this->base + ONENAND_BOOTRAM);
@@ -2646,6 +3309,7 @@ static int onenand_probe(struct mtd_info *mtd)
maf_id = this->read_word(this->base + ONENAND_REG_MANUFACTURER_ID);
dev_id = this->read_word(this->base + ONENAND_REG_DEVICE_ID);
ver_id = this->read_word(this->base + ONENAND_REG_VERSION_ID);
+ this->technology = this->read_word(this->base + ONENAND_REG_TECHNOLOGY);
/* Check OneNAND device */
if (maf_id != bram_maf_id || dev_id != bram_dev_id)
@@ -2657,29 +3321,55 @@ static int onenand_probe(struct mtd_info *mtd)
this->version_id = ver_id;
density = onenand_get_density(dev_id);
+ if (FLEXONENAND(this)) {
+ this->dies = ONENAND_IS_DDP(this) ? 2 : 1;
+ /* Maximum possible erase regions */
+ mtd->numeraseregions = this->dies << 1;
+ mtd->eraseregions = kzalloc(sizeof(struct mtd_erase_region_info)
+ * (this->dies << 1), GFP_KERNEL);
+ if (!mtd->eraseregions)
+ return -ENOMEM;
+ }
+
+ /*
+ * For Flex-OneNAND, chipsize represents maximum possible device size.
+ * mtd->size represents the actual device size.
+ */
this->chipsize = (16 << density) << 20;
- /* Set density mask. it is used for DDP */
- if (ONENAND_IS_DDP(this))
- this->density_mask = (1 << (density + 6));
- else
- this->density_mask = 0;
/* OneNAND page size & block size */
/* The data buffer size is equal to page size */
mtd->writesize = this->read_word(this->base + ONENAND_REG_DATA_BUFFER_SIZE);
+ /* We use the full BufferRAM */
+ if (ONENAND_IS_MLC(this))
+ mtd->writesize <<= 1;
+
mtd->oobsize = mtd->writesize >> 5;
/* Pages per a block are always 64 in OneNAND */
mtd->erasesize = mtd->writesize << 6;
+ /*
+ * Flex-OneNAND SLC area has 64 pages per block.
+ * Flex-OneNAND MLC area has 128 pages per block.
+ * Expose MLC erase size to find erase_shift and page_mask.
+ */
+ if (FLEXONENAND(this))
+ mtd->erasesize <<= 1;
this->erase_shift = ffs(mtd->erasesize) - 1;
this->page_shift = ffs(mtd->writesize) - 1;
this->page_mask = (1 << (this->erase_shift - this->page_shift)) - 1;
+ /* Set density mask. it is used for DDP */
+ if (ONENAND_IS_DDP(this))
+ this->density_mask = this->chipsize >> (this->erase_shift + 1);
/* It's real page size */
this->writesize = mtd->writesize;
/* REVIST: Multichip handling */
- mtd->size = this->chipsize;
+ if (FLEXONENAND(this))
+ flexonenand_get_size(mtd);
+ else
+ mtd->size = this->chipsize;
/* Check OneNAND features */
onenand_check_features(mtd);
@@ -2734,7 +3424,7 @@ static void onenand_resume(struct mtd_info *mtd)
*/
int onenand_scan(struct mtd_info *mtd, int maxchips)
{
- int i;
+ int i, ret;
struct onenand_chip *this = mtd->priv;
if (!this->read_word)
@@ -2746,6 +3436,10 @@ int onenand_scan(struct mtd_info *mtd, int maxchips)
this->command = onenand_command;
if (!this->wait)
onenand_setup_wait(mtd);
+ if (!this->bbt_wait)
+ this->bbt_wait = onenand_bbt_wait;
+ if (!this->unlock_all)
+ this->unlock_all = onenand_unlock_all;
if (!this->read_bufferram)
this->read_bufferram = onenand_read_bufferram;
@@ -2796,6 +3490,10 @@ int onenand_scan(struct mtd_info *mtd, int maxchips)
* Allow subpage writes up to oobsize.
*/
switch (mtd->oobsize) {
+ case 128:
+ this->ecclayout = &onenand_oob_128;
+ mtd->subpage_sft = 0;
+ break;
case 64:
this->ecclayout = &onenand_oob_64;
mtd->subpage_sft = 2;
@@ -2859,9 +3557,18 @@ int onenand_scan(struct mtd_info *mtd, int maxchips)
mtd->owner = THIS_MODULE;
/* Unlock whole block */
- onenand_unlock_all(mtd);
+ this->unlock_all(mtd);
+
+ ret = this->scan_bbt(mtd);
+ if ((!FLEXONENAND(this)) || ret)
+ return ret;
- return this->scan_bbt(mtd);
+ /* Change Flex-OneNAND boundaries if required */
+ for (i = 0; i < MAX_DIES; i++)
+ flexonenand_set_boundary(mtd, i, flex_bdry[2 * i],
+ flex_bdry[(2 * i) + 1]);
+
+ return 0;
}
/**
@@ -2890,6 +3597,7 @@ void onenand_release(struct mtd_info *mtd)
kfree(this->page_buf);
if (this->options & ONENAND_OOBBUF_ALLOC)
kfree(this->oob_buf);
+ kfree(mtd->eraseregions);
}
EXPORT_SYMBOL_GPL(onenand_scan);
diff --git a/drivers/mtd/onenand/onenand_bbt.c b/drivers/mtd/onenand/onenand_bbt.c
index 2f53b51c6805..a91fcac1af01 100644
--- a/drivers/mtd/onenand/onenand_bbt.c
+++ b/drivers/mtd/onenand/onenand_bbt.c
@@ -63,6 +63,7 @@ static int create_bbt(struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_descr
loff_t from;
size_t readlen, ooblen;
struct mtd_oob_ops ops;
+ int rgn;
printk(KERN_INFO "Scanning device for bad blocks\n");
@@ -76,7 +77,7 @@ static int create_bbt(struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_descr
/* Note that numblocks is 2 * (real numblocks) here;
* see i += 2 below as it makses shifting and masking less painful
*/
- numblocks = mtd->size >> (bbm->bbt_erase_shift - 1);
+ numblocks = this->chipsize >> (bbm->bbt_erase_shift - 1);
startblock = 0;
from = 0;
@@ -106,7 +107,12 @@ static int create_bbt(struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_descr
}
}
i += 2;
- from += (1 << bbm->bbt_erase_shift);
+
+ if (FLEXONENAND(this)) {
+ rgn = flexonenand_region(mtd, from);
+ from += mtd->eraseregions[rgn].erasesize;
+ } else
+ from += (1 << bbm->bbt_erase_shift);
}
return 0;
@@ -143,7 +149,7 @@ static int onenand_isbad_bbt(struct mtd_info *mtd, loff_t offs, int allowbbt)
uint8_t res;
/* Get block number * 2 */
- block = (int) (offs >> (bbm->bbt_erase_shift - 1));
+ block = (int) (onenand_block(this, offs) << 1);
res = (bbm->bbt[block >> 3] >> (block & 0x06)) & 0x03;
DEBUG(MTD_DEBUG_LEVEL2, "onenand_isbad_bbt: bbt info for offs 0x%08x: (block %d) 0x%02x\n",
@@ -178,7 +184,7 @@ int onenand_scan_bbt(struct mtd_info *mtd, struct nand_bbt_descr *bd)
struct bbm_info *bbm = this->bbm;
int len, ret = 0;
- len = mtd->size >> (this->erase_shift + 2);
+ len = this->chipsize >> (this->erase_shift + 2);
/* Allocate memory (2bit per block) and clear the memory bad block table */
bbm->bbt = kzalloc(len, GFP_KERNEL);
if (!bbm->bbt) {
diff --git a/drivers/mtd/onenand/onenand_sim.c b/drivers/mtd/onenand/onenand_sim.c
index d64200b7c94b..f6e3c8aebd3a 100644
--- a/drivers/mtd/onenand/onenand_sim.c
+++ b/drivers/mtd/onenand/onenand_sim.c
@@ -6,6 +6,10 @@
* Copyright © 2005-2007 Samsung Electronics
* Kyungmin Park <kyungmin.park@samsung.com>
*
+ * Vishak G <vishak.g at samsung.com>, Rohit Hagargundgi <h.rohit at samsung.com>
+ * Flex-OneNAND simulator support
+ * Copyright (C) Samsung Electronics, 2008
+ *
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
@@ -24,16 +28,38 @@
#ifndef CONFIG_ONENAND_SIM_MANUFACTURER
#define CONFIG_ONENAND_SIM_MANUFACTURER 0xec
#endif
+
#ifndef CONFIG_ONENAND_SIM_DEVICE_ID
#define CONFIG_ONENAND_SIM_DEVICE_ID 0x04
#endif
+
+#define CONFIG_FLEXONENAND ((CONFIG_ONENAND_SIM_DEVICE_ID >> 9) & 1)
+
#ifndef CONFIG_ONENAND_SIM_VERSION_ID
#define CONFIG_ONENAND_SIM_VERSION_ID 0x1e
#endif
+#ifndef CONFIG_ONENAND_SIM_TECHNOLOGY_ID
+#define CONFIG_ONENAND_SIM_TECHNOLOGY_ID CONFIG_FLEXONENAND
+#endif
+
+/* Initial boundary values for Flex-OneNAND Simulator */
+#ifndef CONFIG_FLEXONENAND_SIM_DIE0_BOUNDARY
+#define CONFIG_FLEXONENAND_SIM_DIE0_BOUNDARY 0x01
+#endif
+
+#ifndef CONFIG_FLEXONENAND_SIM_DIE1_BOUNDARY
+#define CONFIG_FLEXONENAND_SIM_DIE1_BOUNDARY 0x01
+#endif
+
static int manuf_id = CONFIG_ONENAND_SIM_MANUFACTURER;
static int device_id = CONFIG_ONENAND_SIM_DEVICE_ID;
static int version_id = CONFIG_ONENAND_SIM_VERSION_ID;
+static int technology_id = CONFIG_ONENAND_SIM_TECHNOLOGY_ID;
+static int boundary[] = {
+ CONFIG_FLEXONENAND_SIM_DIE0_BOUNDARY,
+ CONFIG_FLEXONENAND_SIM_DIE1_BOUNDARY,
+};
struct onenand_flash {
void __iomem *base;
@@ -57,12 +83,18 @@ struct onenand_flash {
(writew(v, this->base + ONENAND_REG_WP_STATUS))
/* It has all 0xff chars */
-#define MAX_ONENAND_PAGESIZE (2048 + 64)
+#define MAX_ONENAND_PAGESIZE (4096 + 128)
static unsigned char *ffchars;
+#if CONFIG_FLEXONENAND
+#define PARTITION_NAME "Flex-OneNAND simulator partition"
+#else
+#define PARTITION_NAME "OneNAND simulator partition"
+#endif
+
static struct mtd_partition os_partitions[] = {
{
- .name = "OneNAND simulator partition",
+ .name = PARTITION_NAME,
.offset = 0,
.size = MTDPART_SIZ_FULL,
},
@@ -104,6 +136,7 @@ static void onenand_lock_handle(struct onenand_chip *this, int cmd)
switch (cmd) {
case ONENAND_CMD_UNLOCK:
+ case ONENAND_CMD_UNLOCK_ALL:
if (block_lock_scheme)
ONENAND_SET_WP_STATUS(ONENAND_WP_US, this);
else
@@ -228,10 +261,12 @@ static void onenand_data_handle(struct onenand_chip *this, int cmd,
{
struct mtd_info *mtd = &info->mtd;
struct onenand_flash *flash = this->priv;
- int main_offset, spare_offset;
+ int main_offset, spare_offset, die = 0;
void __iomem *src;
void __iomem *dest;
unsigned int i;
+ static int pi_operation;
+ int erasesize, rgn;
if (dataram) {
main_offset = mtd->writesize;
@@ -241,10 +276,27 @@ static void onenand_data_handle(struct onenand_chip *this, int cmd,
spare_offset = 0;
}
+ if (pi_operation) {
+ die = readw(this->base + ONENAND_REG_START_ADDRESS2);
+ die >>= ONENAND_DDP_SHIFT;
+ }
+
switch (cmd) {
+ case FLEXONENAND_CMD_PI_ACCESS:
+ pi_operation = 1;
+ break;
+
+ case ONENAND_CMD_RESET:
+ pi_operation = 0;
+ break;
+
case ONENAND_CMD_READ:
src = ONENAND_CORE(flash) + offset;
dest = ONENAND_MAIN_AREA(this, main_offset);
+ if (pi_operation) {
+ writew(boundary[die], this->base + ONENAND_DATARAM);
+ break;
+ }
memcpy(dest, src, mtd->writesize);
/* Fall through */
@@ -257,6 +309,10 @@ static void onenand_data_handle(struct onenand_chip *this, int cmd,
case ONENAND_CMD_PROG:
src = ONENAND_MAIN_AREA(this, main_offset);
dest = ONENAND_CORE(flash) + offset;
+ if (pi_operation) {
+ boundary[die] = readw(this->base + ONENAND_DATARAM);
+ break;
+ }
/* To handle partial write */
for (i = 0; i < (1 << mtd->subpage_sft); i++) {
int off = i * this->subpagesize;
@@ -284,9 +340,18 @@ static void onenand_data_handle(struct onenand_chip *this, int cmd,
break;
case ONENAND_CMD_ERASE:
- memset(ONENAND_CORE(flash) + offset, 0xff, mtd->erasesize);
+ if (pi_operation)
+ break;
+
+ if (FLEXONENAND(this)) {
+ rgn = flexonenand_region(mtd, offset);
+ erasesize = mtd->eraseregions[rgn].erasesize;
+ } else
+ erasesize = mtd->erasesize;
+
+ memset(ONENAND_CORE(flash) + offset, 0xff, erasesize);
memset(ONENAND_CORE_SPARE(flash, this, offset), 0xff,
- (mtd->erasesize >> 5));
+ (erasesize >> 5));
break;
default:
@@ -339,7 +404,7 @@ static void onenand_command_handle(struct onenand_chip *this, int cmd)
}
if (block != -1)
- offset += block << this->erase_shift;
+ offset = onenand_addr(this, block);
if (page != -1)
offset += page << this->page_shift;
@@ -390,6 +455,7 @@ static int __init flash_init(struct onenand_flash *flash)
}
density = device_id >> ONENAND_DEVICE_DENSITY_SHIFT;
+ density &= ONENAND_DEVICE_DENSITY_MASK;
size = ((16 << 20) << density);
ONENAND_CORE(flash) = vmalloc(size + (size >> 5));
@@ -405,8 +471,9 @@ static int __init flash_init(struct onenand_flash *flash)
writew(manuf_id, flash->base + ONENAND_REG_MANUFACTURER_ID);
writew(device_id, flash->base + ONENAND_REG_DEVICE_ID);
writew(version_id, flash->base + ONENAND_REG_VERSION_ID);
+ writew(technology_id, flash->base + ONENAND_REG_TECHNOLOGY);
- if (density < 2)
+ if (density < 2 && (!CONFIG_FLEXONENAND))
buffer_size = 0x0400; /* 1KiB page */
else
buffer_size = 0x0800; /* 2KiB page */
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 892a9e4e275f..c155bd3ec9f1 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -1725,6 +1725,7 @@ config TLAN
config KS8842
tristate "Micrel KSZ8842"
+ depends on HAS_IOMEM
help
This platform driver is for Micrel KSZ8842 chip.
@@ -2443,6 +2444,17 @@ config JME
To compile this driver as a module, choose M here. The module
will be called jme.
+config S6GMAC
+ tristate "S6105 GMAC ethernet support"
+ depends on XTENSA_VARIANT_S6000
+ select PHYLIB
+ help
+ This driver supports the on chip ethernet device on the
+ S6105 xtensa processor.
+
+ To compile this driver as a module, choose M here. The module
+ will be called s6gmac.
+
endif # NETDEV_1000
#
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index d366fb2b40e9..4b58a59f211b 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -245,6 +245,7 @@ obj-$(CONFIG_XTENSA_XT2000_SONIC) += xtsonic.o
obj-$(CONFIG_DNET) += dnet.o
obj-$(CONFIG_MACB) += macb.o
+obj-$(CONFIG_S6GMAC) += s6gmac.o
obj-$(CONFIG_ARM) += arm/
obj-$(CONFIG_DEV_APPLETALK) += appletalk/
diff --git a/drivers/net/a2065.c b/drivers/net/a2065.c
index 85a18175730b..08787f5a22a3 100644
--- a/drivers/net/a2065.c
+++ b/drivers/net/a2065.c
@@ -569,16 +569,8 @@ static int lance_start_xmit (struct sk_buff *skb, struct net_device *dev)
#ifdef DEBUG_DRIVER
/* dump the packet */
- {
- int i;
-
- for (i = 0; i < 64; i++) {
- if ((i % 16) == 0)
- printk("\n" KERN_DEBUG);
- printk ("%2.2x ", skb->data [i]);
- }
- printk("\n");
- }
+ print_hex_dump(KERN_DEBUG, "skb->data: ", DUMP_PREFIX_NONE,
+ 16, 1, skb->data, 64, true);
#endif
entry = lp->tx_new & lp->tx_ring_mod_mask;
ib->btx_ring [entry].length = (-skblen) | 0xf000;
diff --git a/drivers/net/arcnet/arcnet.c b/drivers/net/arcnet/arcnet.c
index d6d4ab3b430c..7d227cdab9f8 100644
--- a/drivers/net/arcnet/arcnet.c
+++ b/drivers/net/arcnet/arcnet.c
@@ -158,15 +158,12 @@ module_exit(arcnet_exit);
void arcnet_dump_skb(struct net_device *dev,
struct sk_buff *skb, char *desc)
{
- int i;
+ char hdr[32];
- printk(KERN_DEBUG "%6s: skb dump (%s) follows:", dev->name, desc);
- for (i = 0; i < skb->len; i++) {
- if (i % 16 == 0)
- printk("\n" KERN_DEBUG "[%04X] ", i);
- printk("%02X ", ((u_char *) skb->data)[i]);
- }
- printk("\n");
+ /* dump the packet */
+ snprintf(hdr, sizeof(hdr), "%6s:%s skb->data:", dev->name, desc);
+ print_hex_dump(KERN_DEBUG, hdr, DUMP_PREFIX_OFFSET,
+ 16, 1, skb->data, skb->len, true);
}
EXPORT_SYMBOL(arcnet_dump_skb);
@@ -184,6 +181,7 @@ static void arcnet_dump_packet(struct net_device *dev, int bufnum,
int i, length;
unsigned long flags = 0;
static uint8_t buf[512];
+ char hdr[32];
/* hw.copy_from_card expects IRQ context so take the IRQ lock
to keep it single threaded */
@@ -197,14 +195,10 @@ static void arcnet_dump_packet(struct net_device *dev, int bufnum,
/* if the offset[0] byte is nonzero, this is a 256-byte packet */
length = (buf[2] ? 256 : 512);
- printk(KERN_DEBUG "%6s: packet dump (%s) follows:", dev->name, desc);
- for (i = 0; i < length; i++) {
- if (i % 16 == 0)
- printk("\n" KERN_DEBUG "[%04X] ", i);
- printk("%02X ", buf[i]);
- }
- printk("\n");
-
+ /* dump the packet */
+ snprintf(hdr, sizeof(hdr), "%6s:%s packet dump:", dev->name, desc);
+ print_hex_dump(KERN_DEBUG, hdr, DUMP_PREFIX_OFFSET,
+ 16, 1, buf, length, true);
}
#else
diff --git a/drivers/net/atl1c/atl1c_ethtool.c b/drivers/net/atl1c/atl1c_ethtool.c
index e4afbd628c23..607007d75b6f 100644
--- a/drivers/net/atl1c/atl1c_ethtool.c
+++ b/drivers/net/atl1c/atl1c_ethtool.c
@@ -281,6 +281,8 @@ static int atl1c_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
if (wol->wolopts & WAKE_PHY)
adapter->wol |= AT_WUFC_LNKC;
+ device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
+
return 0;
}
diff --git a/drivers/net/atl1e/atl1e_ethtool.c b/drivers/net/atl1e/atl1e_ethtool.c
index 619c6583e1aa..4003955d7a96 100644
--- a/drivers/net/atl1e/atl1e_ethtool.c
+++ b/drivers/net/atl1e/atl1e_ethtool.c
@@ -365,6 +365,8 @@ static int atl1e_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
if (wol->wolopts & WAKE_PHY)
adapter->wol |= AT_WUFC_LNKC;
+ device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
+
return 0;
}
diff --git a/drivers/net/benet/be.h b/drivers/net/benet/be.h
index b4bb06fdf307..5b4bf3d2cdc2 100644
--- a/drivers/net/benet/be.h
+++ b/drivers/net/benet/be.h
@@ -65,7 +65,7 @@ static inline char *nic_name(struct pci_dev *pdev)
#define TX_CQ_LEN 1024
#define RX_Q_LEN 1024 /* Does not support any other value */
#define RX_CQ_LEN 1024
-#define MCC_Q_LEN 64 /* total size not to exceed 8 pages */
+#define MCC_Q_LEN 128 /* total size not to exceed 8 pages */
#define MCC_CQ_LEN 256
#define BE_NAPI_WEIGHT 64
@@ -73,7 +73,7 @@ static inline char *nic_name(struct pci_dev *pdev)
#define RX_FRAGS_REFILL_WM (RX_Q_LEN - MAX_RX_POST)
#define BE_MAX_LRO_DESCRIPTORS 16
-#define BE_MAX_FRAGS_PER_FRAME 16
+#define BE_MAX_FRAGS_PER_FRAME (min((u32) 16, (u32) MAX_SKB_FRAGS))
struct be_dma_mem {
void *va;
@@ -91,6 +91,61 @@ struct be_queue_info {
atomic_t used; /* Number of valid elements in the queue */
};
+static inline u32 MODULO(u16 val, u16 limit)
+{
+ BUG_ON(limit & (limit - 1));
+ return val & (limit - 1);
+}
+
+static inline void index_adv(u16 *index, u16 val, u16 limit)
+{
+ *index = MODULO((*index + val), limit);
+}
+
+static inline void index_inc(u16 *index, u16 limit)
+{
+ *index = MODULO((*index + 1), limit);
+}
+
+static inline void *queue_head_node(struct be_queue_info *q)
+{
+ return q->dma_mem.va + q->head * q->entry_size;
+}
+
+static inline void *queue_tail_node(struct be_queue_info *q)
+{
+ return q->dma_mem.va + q->tail * q->entry_size;
+}
+
+static inline void queue_head_inc(struct be_queue_info *q)
+{
+ index_inc(&q->head, q->len);
+}
+
+static inline void queue_tail_inc(struct be_queue_info *q)
+{
+ index_inc(&q->tail, q->len);
+}
+
+
+struct be_eq_obj {
+ struct be_queue_info q;
+ char desc[32];
+
+ /* Adaptive interrupt coalescing (AIC) info */
+ bool enable_aic;
+ u16 min_eqd; /* in usecs */
+ u16 max_eqd; /* in usecs */
+ u16 cur_eqd; /* in usecs */
+
+ struct napi_struct napi;
+};
+
+struct be_mcc_obj {
+ struct be_queue_info q;
+ struct be_queue_info cq;
+};
+
struct be_ctrl_info {
u8 __iomem *csr;
u8 __iomem *db; /* Door Bell */
@@ -98,11 +153,20 @@ struct be_ctrl_info {
int pci_func;
/* Mbox used for cmd request/response */
- spinlock_t cmd_lock; /* For serializing cmds to BE card */
+ spinlock_t mbox_lock; /* For serializing mbox cmds to BE card */
struct be_dma_mem mbox_mem;
/* Mbox mem is adjusted to align to 16 bytes. The allocated addr
* is stored for freeing purpose */
struct be_dma_mem mbox_mem_alloced;
+
+ /* MCC Rings */
+ struct be_mcc_obj mcc_obj;
+ spinlock_t mcc_lock; /* For serializing mcc cmds to BE card */
+ spinlock_t mcc_cq_lock;
+
+ /* MCC Async callback */
+ void (*async_cb)(void *adapter, bool link_up);
+ void *adapter_ctxt;
};
#include "be_cmds.h"
@@ -150,19 +214,6 @@ struct be_stats_obj {
struct be_dma_mem cmd;
};
-struct be_eq_obj {
- struct be_queue_info q;
- char desc[32];
-
- /* Adaptive interrupt coalescing (AIC) info */
- bool enable_aic;
- u16 min_eqd; /* in usecs */
- u16 max_eqd; /* in usecs */
- u16 cur_eqd; /* in usecs */
-
- struct napi_struct napi;
-};
-
struct be_tx_obj {
struct be_queue_info q;
struct be_queue_info cq;
@@ -225,8 +276,9 @@ struct be_adapter {
u32 if_handle; /* Used to configure filtering */
u32 pmac_id; /* MAC addr handle used by BE card */
- struct be_link_info link;
+ bool link_up;
u32 port_num;
+ bool promiscuous;
};
extern struct ethtool_ops be_ethtool_ops;
@@ -235,22 +287,6 @@ extern struct ethtool_ops be_ethtool_ops;
#define BE_SET_NETDEV_OPS(netdev, ops) (netdev->netdev_ops = ops)
-static inline u32 MODULO(u16 val, u16 limit)
-{
- BUG_ON(limit & (limit - 1));
- return val & (limit - 1);
-}
-
-static inline void index_adv(u16 *index, u16 val, u16 limit)
-{
- *index = MODULO((*index + val), limit);
-}
-
-static inline void index_inc(u16 *index, u16 limit)
-{
- *index = MODULO((*index + 1), limit);
-}
-
#define PAGE_SHIFT_4K 12
#define PAGE_SIZE_4K (1 << PAGE_SHIFT_4K)
@@ -339,4 +375,6 @@ static inline u8 is_udp_pkt(struct sk_buff *skb)
return val;
}
+extern void be_cq_notify(struct be_ctrl_info *ctrl, u16 qid, bool arm,
+ u16 num_popped);
#endif /* BE_H */
diff --git a/drivers/net/benet/be_cmds.c b/drivers/net/benet/be_cmds.c
index d444aed962bc..583517ed56f0 100644
--- a/drivers/net/benet/be_cmds.c
+++ b/drivers/net/benet/be_cmds.c
@@ -17,6 +17,133 @@
#include "be.h"
+static void be_mcc_notify(struct be_ctrl_info *ctrl)
+{
+ struct be_queue_info *mccq = &ctrl->mcc_obj.q;
+ u32 val = 0;
+
+ val |= mccq->id & DB_MCCQ_RING_ID_MASK;
+ val |= 1 << DB_MCCQ_NUM_POSTED_SHIFT;
+ iowrite32(val, ctrl->db + DB_MCCQ_OFFSET);
+}
+
+/* To check if valid bit is set, check the entire word as we don't know
+ * the endianness of the data (old entry is host endian while a new entry is
+ * little endian) */
+static inline bool be_mcc_compl_is_new(struct be_mcc_cq_entry *compl)
+{
+ if (compl->flags != 0) {
+ compl->flags = le32_to_cpu(compl->flags);
+ BUG_ON((compl->flags & CQE_FLAGS_VALID_MASK) == 0);
+ return true;
+ } else {
+ return false;
+ }
+}
+
+/* Need to reset the entire word that houses the valid bit */
+static inline void be_mcc_compl_use(struct be_mcc_cq_entry *compl)
+{
+ compl->flags = 0;
+}
+
+static int be_mcc_compl_process(struct be_ctrl_info *ctrl,
+ struct be_mcc_cq_entry *compl)
+{
+ u16 compl_status, extd_status;
+
+ /* Just swap the status to host endian; mcc tag is opaquely copied
+ * from mcc_wrb */
+ be_dws_le_to_cpu(compl, 4);
+
+ compl_status = (compl->status >> CQE_STATUS_COMPL_SHIFT) &
+ CQE_STATUS_COMPL_MASK;
+ if (compl_status != MCC_STATUS_SUCCESS) {
+ extd_status = (compl->status >> CQE_STATUS_EXTD_SHIFT) &
+ CQE_STATUS_EXTD_MASK;
+ printk(KERN_WARNING DRV_NAME
+ " error in cmd completion: status(compl/extd)=%d/%d\n",
+ compl_status, extd_status);
+ return -1;
+ }
+ return 0;
+}
+
+/* Link state evt is a string of bytes; no need for endian swapping */
+static void be_async_link_state_process(struct be_ctrl_info *ctrl,
+ struct be_async_event_link_state *evt)
+{
+ ctrl->async_cb(ctrl->adapter_ctxt,
+ evt->port_link_status == ASYNC_EVENT_LINK_UP ? true : false);
+}
+
+static inline bool is_link_state_evt(u32 trailer)
+{
+ return (((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) &
+ ASYNC_TRAILER_EVENT_CODE_MASK) ==
+ ASYNC_EVENT_CODE_LINK_STATE);
+}
+
+static struct be_mcc_cq_entry *be_mcc_compl_get(struct be_ctrl_info *ctrl)
+{
+ struct be_queue_info *mcc_cq = &ctrl->mcc_obj.cq;
+ struct be_mcc_cq_entry *compl = queue_tail_node(mcc_cq);
+
+ if (be_mcc_compl_is_new(compl)) {
+ queue_tail_inc(mcc_cq);
+ return compl;
+ }
+ return NULL;
+}
+
+void be_process_mcc(struct be_ctrl_info *ctrl)
+{
+ struct be_mcc_cq_entry *compl;
+ int num = 0;
+
+ spin_lock_bh(&ctrl->mcc_cq_lock);
+ while ((compl = be_mcc_compl_get(ctrl))) {
+ if (compl->flags & CQE_FLAGS_ASYNC_MASK) {
+ /* Interpret flags as an async trailer */
+ BUG_ON(!is_link_state_evt(compl->flags));
+
+ /* Interpret compl as a async link evt */
+ be_async_link_state_process(ctrl,
+ (struct be_async_event_link_state *) compl);
+ } else {
+ be_mcc_compl_process(ctrl, compl);
+ atomic_dec(&ctrl->mcc_obj.q.used);
+ }
+ be_mcc_compl_use(compl);
+ num++;
+ }
+ if (num)
+ be_cq_notify(ctrl, ctrl->mcc_obj.cq.id, true, num);
+ spin_unlock_bh(&ctrl->mcc_cq_lock);
+}
+
+/* Wait till no more pending mcc requests are present */
+static void be_mcc_wait_compl(struct be_ctrl_info *ctrl)
+{
+#define mcc_timeout 50000 /* 5s timeout */
+ int i;
+ for (i = 0; i < mcc_timeout; i++) {
+ be_process_mcc(ctrl);
+ if (atomic_read(&ctrl->mcc_obj.q.used) == 0)
+ break;
+ udelay(100);
+ }
+ if (i == mcc_timeout)
+ printk(KERN_WARNING DRV_NAME "mcc poll timed out\n");
+}
+
+/* Notify MCC requests and wait for completion */
+static void be_mcc_notify_wait(struct be_ctrl_info *ctrl)
+{
+ be_mcc_notify(ctrl);
+ be_mcc_wait_compl(ctrl);
+}
+
static int be_mbox_db_ready_wait(void __iomem *db)
{
int cnt = 0, wait = 5;
@@ -44,11 +171,11 @@ static int be_mbox_db_ready_wait(void __iomem *db)
/*
* Insert the mailbox address into the doorbell in two steps
+ * Polls on the mbox doorbell till a command completion (or a timeout) occurs
*/
static int be_mbox_db_ring(struct be_ctrl_info *ctrl)
{
int status;
- u16 compl_status, extd_status;
u32 val = 0;
void __iomem *db = ctrl->db + MPU_MAILBOX_DB_OFFSET;
struct be_dma_mem *mbox_mem = &ctrl->mbox_mem;
@@ -79,24 +206,17 @@ static int be_mbox_db_ring(struct be_ctrl_info *ctrl)
if (status != 0)
return status;
- /* compl entry has been made now */
- be_dws_le_to_cpu(cqe, sizeof(*cqe));
- if (!(cqe->flags & CQE_FLAGS_VALID_MASK)) {
- printk(KERN_WARNING DRV_NAME ": ERROR invalid mbox compl\n");
+ /* A cq entry has been made now */
+ if (be_mcc_compl_is_new(cqe)) {
+ status = be_mcc_compl_process(ctrl, &mbox->cqe);
+ be_mcc_compl_use(cqe);
+ if (status)
+ return status;
+ } else {
+ printk(KERN_WARNING DRV_NAME "invalid mailbox completion\n");
return -1;
}
-
- compl_status = (cqe->status >> CQE_STATUS_COMPL_SHIFT) &
- CQE_STATUS_COMPL_MASK;
- if (compl_status != MCC_STATUS_SUCCESS) {
- extd_status = (cqe->status >> CQE_STATUS_EXTD_SHIFT) &
- CQE_STATUS_EXTD_MASK;
- printk(KERN_WARNING DRV_NAME
- ": ERROR in cmd compl. status(compl/extd)=%d/%d\n",
- compl_status, extd_status);
- }
-
- return compl_status;
+ return 0;
}
static int be_POST_stage_get(struct be_ctrl_info *ctrl, u16 *stage)
@@ -235,6 +355,18 @@ static inline struct be_mcc_wrb *wrb_from_mbox(struct be_dma_mem *mbox_mem)
return &((struct be_mcc_mailbox *)(mbox_mem->va))->wrb;
}
+static inline struct be_mcc_wrb *wrb_from_mcc(struct be_queue_info *mccq)
+{
+ struct be_mcc_wrb *wrb = NULL;
+ if (atomic_read(&mccq->used) < mccq->len) {
+ wrb = queue_head_node(mccq);
+ queue_head_inc(mccq);
+ atomic_inc(&mccq->used);
+ memset(wrb, 0, sizeof(*wrb));
+ }
+ return wrb;
+}
+
int be_cmd_eq_create(struct be_ctrl_info *ctrl,
struct be_queue_info *eq, int eq_delay)
{
@@ -244,7 +376,7 @@ int be_cmd_eq_create(struct be_ctrl_info *ctrl,
struct be_dma_mem *q_mem = &eq->dma_mem;
int status;
- spin_lock(&ctrl->cmd_lock);
+ spin_lock(&ctrl->mbox_lock);
memset(wrb, 0, sizeof(*wrb));
be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
@@ -272,7 +404,7 @@ int be_cmd_eq_create(struct be_ctrl_info *ctrl,
eq->id = le16_to_cpu(resp->eq_id);
eq->created = true;
}
- spin_unlock(&ctrl->cmd_lock);
+ spin_unlock(&ctrl->mbox_lock);
return status;
}
@@ -284,7 +416,7 @@ int be_cmd_mac_addr_query(struct be_ctrl_info *ctrl, u8 *mac_addr,
struct be_cmd_resp_mac_query *resp = embedded_payload(wrb);
int status;
- spin_lock(&ctrl->cmd_lock);
+ spin_lock(&ctrl->mbox_lock);
memset(wrb, 0, sizeof(*wrb));
be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
@@ -304,7 +436,7 @@ int be_cmd_mac_addr_query(struct be_ctrl_info *ctrl, u8 *mac_addr,
if (!status)
memcpy(mac_addr, resp->mac.addr, ETH_ALEN);
- spin_unlock(&ctrl->cmd_lock);
+ spin_unlock(&ctrl->mbox_lock);
return status;
}
@@ -315,7 +447,7 @@ int be_cmd_pmac_add(struct be_ctrl_info *ctrl, u8 *mac_addr,
struct be_cmd_req_pmac_add *req = embedded_payload(wrb);
int status;
- spin_lock(&ctrl->cmd_lock);
+ spin_lock(&ctrl->mbox_lock);
memset(wrb, 0, sizeof(*wrb));
be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
@@ -332,7 +464,7 @@ int be_cmd_pmac_add(struct be_ctrl_info *ctrl, u8 *mac_addr,
*pmac_id = le32_to_cpu(resp->pmac_id);
}
- spin_unlock(&ctrl->cmd_lock);
+ spin_unlock(&ctrl->mbox_lock);
return status;
}
@@ -342,7 +474,7 @@ int be_cmd_pmac_del(struct be_ctrl_info *ctrl, u32 if_id, u32 pmac_id)
struct be_cmd_req_pmac_del *req = embedded_payload(wrb);
int status;
- spin_lock(&ctrl->cmd_lock);
+ spin_lock(&ctrl->mbox_lock);
memset(wrb, 0, sizeof(*wrb));
be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
@@ -354,7 +486,7 @@ int be_cmd_pmac_del(struct be_ctrl_info *ctrl, u32 if_id, u32 pmac_id)
req->pmac_id = cpu_to_le32(pmac_id);
status = be_mbox_db_ring(ctrl);
- spin_unlock(&ctrl->cmd_lock);
+ spin_unlock(&ctrl->mbox_lock);
return status;
}
@@ -370,7 +502,7 @@ int be_cmd_cq_create(struct be_ctrl_info *ctrl,
void *ctxt = &req->context;
int status;
- spin_lock(&ctrl->cmd_lock);
+ spin_lock(&ctrl->mbox_lock);
memset(wrb, 0, sizeof(*wrb));
be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
@@ -388,7 +520,7 @@ int be_cmd_cq_create(struct be_ctrl_info *ctrl,
AMAP_SET_BITS(struct amap_cq_context, solevent, ctxt, sol_evts);
AMAP_SET_BITS(struct amap_cq_context, eventable, ctxt, 1);
AMAP_SET_BITS(struct amap_cq_context, eqid, ctxt, eq->id);
- AMAP_SET_BITS(struct amap_cq_context, armed, ctxt, 0);
+ AMAP_SET_BITS(struct amap_cq_context, armed, ctxt, 1);
AMAP_SET_BITS(struct amap_cq_context, func, ctxt, ctrl->pci_func);
be_dws_cpu_to_le(ctxt, sizeof(req->context));
@@ -399,7 +531,56 @@ int be_cmd_cq_create(struct be_ctrl_info *ctrl,
cq->id = le16_to_cpu(resp->cq_id);
cq->created = true;
}
- spin_unlock(&ctrl->cmd_lock);
+ spin_unlock(&ctrl->mbox_lock);
+
+ return status;
+}
+
+static u32 be_encoded_q_len(int q_len)
+{
+ u32 len_encoded = fls(q_len); /* log2(len) + 1 */
+ if (len_encoded == 16)
+ len_encoded = 0;
+ return len_encoded;
+}
+
+int be_cmd_mccq_create(struct be_ctrl_info *ctrl,
+ struct be_queue_info *mccq,
+ struct be_queue_info *cq)
+{
+ struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
+ struct be_cmd_req_mcc_create *req = embedded_payload(wrb);
+ struct be_dma_mem *q_mem = &mccq->dma_mem;
+ void *ctxt = &req->context;
+ int status;
+
+ spin_lock(&ctrl->mbox_lock);
+ memset(wrb, 0, sizeof(*wrb));
+
+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
+
+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_MCC_CREATE, sizeof(*req));
+
+ req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
+
+ AMAP_SET_BITS(struct amap_mcc_context, fid, ctxt, ctrl->pci_func);
+ AMAP_SET_BITS(struct amap_mcc_context, valid, ctxt, 1);
+ AMAP_SET_BITS(struct amap_mcc_context, ring_size, ctxt,
+ be_encoded_q_len(mccq->len));
+ AMAP_SET_BITS(struct amap_mcc_context, cq_id, ctxt, cq->id);
+
+ be_dws_cpu_to_le(ctxt, sizeof(req->context));
+
+ be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
+
+ status = be_mbox_db_ring(ctrl);
+ if (!status) {
+ struct be_cmd_resp_mcc_create *resp = embedded_payload(wrb);
+ mccq->id = le16_to_cpu(resp->id);
+ mccq->created = true;
+ }
+ spin_unlock(&ctrl->mbox_lock);
return status;
}
@@ -415,7 +596,7 @@ int be_cmd_txq_create(struct be_ctrl_info *ctrl,
int status;
u32 len_encoded;
- spin_lock(&ctrl->cmd_lock);
+ spin_lock(&ctrl->mbox_lock);
memset(wrb, 0, sizeof(*wrb));
be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
@@ -446,7 +627,7 @@ int be_cmd_txq_create(struct be_ctrl_info *ctrl,
txq->id = le16_to_cpu(resp->cid);
txq->created = true;
}
- spin_unlock(&ctrl->cmd_lock);
+ spin_unlock(&ctrl->mbox_lock);
return status;
}
@@ -460,7 +641,7 @@ int be_cmd_rxq_create(struct be_ctrl_info *ctrl,
struct be_dma_mem *q_mem = &rxq->dma_mem;
int status;
- spin_lock(&ctrl->cmd_lock);
+ spin_lock(&ctrl->mbox_lock);
memset(wrb, 0, sizeof(*wrb));
be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
@@ -482,7 +663,7 @@ int be_cmd_rxq_create(struct be_ctrl_info *ctrl,
rxq->id = le16_to_cpu(resp->id);
rxq->created = true;
}
- spin_unlock(&ctrl->cmd_lock);
+ spin_unlock(&ctrl->mbox_lock);
return status;
}
@@ -496,7 +677,7 @@ int be_cmd_q_destroy(struct be_ctrl_info *ctrl, struct be_queue_info *q,
u8 subsys = 0, opcode = 0;
int status;
- spin_lock(&ctrl->cmd_lock);
+ spin_lock(&ctrl->mbox_lock);
memset(wrb, 0, sizeof(*wrb));
be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
@@ -518,6 +699,10 @@ int be_cmd_q_destroy(struct be_ctrl_info *ctrl, struct be_queue_info *q,
subsys = CMD_SUBSYSTEM_ETH;
opcode = OPCODE_ETH_RX_DESTROY;
break;
+ case QTYPE_MCCQ:
+ subsys = CMD_SUBSYSTEM_COMMON;
+ opcode = OPCODE_COMMON_MCC_DESTROY;
+ break;
default:
printk(KERN_WARNING DRV_NAME ":bad Q type in Q destroy cmd\n");
status = -1;
@@ -528,7 +713,7 @@ int be_cmd_q_destroy(struct be_ctrl_info *ctrl, struct be_queue_info *q,
status = be_mbox_db_ring(ctrl);
err:
- spin_unlock(&ctrl->cmd_lock);
+ spin_unlock(&ctrl->mbox_lock);
return status;
}
@@ -541,7 +726,7 @@ int be_cmd_if_create(struct be_ctrl_info *ctrl, u32 flags, u8 *mac,
struct be_cmd_req_if_create *req = embedded_payload(wrb);
int status;
- spin_lock(&ctrl->cmd_lock);
+ spin_lock(&ctrl->mbox_lock);
memset(wrb, 0, sizeof(*wrb));
be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
@@ -562,7 +747,7 @@ int be_cmd_if_create(struct be_ctrl_info *ctrl, u32 flags, u8 *mac,
*pmac_id = le32_to_cpu(resp->pmac_id);
}
- spin_unlock(&ctrl->cmd_lock);
+ spin_unlock(&ctrl->mbox_lock);
return status;
}
@@ -572,7 +757,7 @@ int be_cmd_if_destroy(struct be_ctrl_info *ctrl, u32 interface_id)
struct be_cmd_req_if_destroy *req = embedded_payload(wrb);
int status;
- spin_lock(&ctrl->cmd_lock);
+ spin_lock(&ctrl->mbox_lock);
memset(wrb, 0, sizeof(*wrb));
be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
@@ -583,7 +768,7 @@ int be_cmd_if_destroy(struct be_ctrl_info *ctrl, u32 interface_id)
req->interface_id = cpu_to_le32(interface_id);
status = be_mbox_db_ring(ctrl);
- spin_unlock(&ctrl->cmd_lock);
+ spin_unlock(&ctrl->mbox_lock);
return status;
}
@@ -598,7 +783,7 @@ int be_cmd_get_stats(struct be_ctrl_info *ctrl, struct be_dma_mem *nonemb_cmd)
struct be_sge *sge = nonembedded_sgl(wrb);
int status;
- spin_lock(&ctrl->cmd_lock);
+ spin_lock(&ctrl->mbox_lock);
memset(wrb, 0, sizeof(*wrb));
memset(req, 0, sizeof(*req));
@@ -617,18 +802,20 @@ int be_cmd_get_stats(struct be_ctrl_info *ctrl, struct be_dma_mem *nonemb_cmd)
be_dws_le_to_cpu(&resp->hw_stats, sizeof(resp->hw_stats));
}
- spin_unlock(&ctrl->cmd_lock);
+ spin_unlock(&ctrl->mbox_lock);
return status;
}
int be_cmd_link_status_query(struct be_ctrl_info *ctrl,
- struct be_link_info *link)
+ bool *link_up)
{
struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
struct be_cmd_req_link_status *req = embedded_payload(wrb);
int status;
- spin_lock(&ctrl->cmd_lock);
+ spin_lock(&ctrl->mbox_lock);
+
+ *link_up = false;
memset(wrb, 0, sizeof(*wrb));
be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
@@ -639,14 +826,11 @@ int be_cmd_link_status_query(struct be_ctrl_info *ctrl,
status = be_mbox_db_ring(ctrl);
if (!status) {
struct be_cmd_resp_link_status *resp = embedded_payload(wrb);
- link->speed = resp->mac_speed;
- link->duplex = resp->mac_duplex;
- link->fault = resp->mac_fault;
- } else {
- link->speed = PHY_LINK_SPEED_ZERO;
+ if (resp->mac_speed != PHY_LINK_SPEED_ZERO)
+ *link_up = true;
}
- spin_unlock(&ctrl->cmd_lock);
+ spin_unlock(&ctrl->mbox_lock);
return status;
}
@@ -656,7 +840,7 @@ int be_cmd_get_fw_ver(struct be_ctrl_info *ctrl, char *fw_ver)
struct be_cmd_req_get_fw_version *req = embedded_payload(wrb);
int status;
- spin_lock(&ctrl->cmd_lock);
+ spin_lock(&ctrl->mbox_lock);
memset(wrb, 0, sizeof(*wrb));
be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
@@ -670,7 +854,7 @@ int be_cmd_get_fw_ver(struct be_ctrl_info *ctrl, char *fw_ver)
strncpy(fw_ver, resp->firmware_version_string, FW_VER_LEN);
}
- spin_unlock(&ctrl->cmd_lock);
+ spin_unlock(&ctrl->mbox_lock);
return status;
}
@@ -681,7 +865,7 @@ int be_cmd_modify_eqd(struct be_ctrl_info *ctrl, u32 eq_id, u32 eqd)
struct be_cmd_req_modify_eq_delay *req = embedded_payload(wrb);
int status;
- spin_lock(&ctrl->cmd_lock);
+ spin_lock(&ctrl->mbox_lock);
memset(wrb, 0, sizeof(*wrb));
be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
@@ -696,7 +880,7 @@ int be_cmd_modify_eqd(struct be_ctrl_info *ctrl, u32 eq_id, u32 eqd)
status = be_mbox_db_ring(ctrl);
- spin_unlock(&ctrl->cmd_lock);
+ spin_unlock(&ctrl->mbox_lock);
return status;
}
@@ -707,7 +891,7 @@ int be_cmd_vlan_config(struct be_ctrl_info *ctrl, u32 if_id, u16 *vtag_array,
struct be_cmd_req_vlan_config *req = embedded_payload(wrb);
int status;
- spin_lock(&ctrl->cmd_lock);
+ spin_lock(&ctrl->mbox_lock);
memset(wrb, 0, sizeof(*wrb));
be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
@@ -726,18 +910,22 @@ int be_cmd_vlan_config(struct be_ctrl_info *ctrl, u32 if_id, u16 *vtag_array,
status = be_mbox_db_ring(ctrl);
- spin_unlock(&ctrl->cmd_lock);
+ spin_unlock(&ctrl->mbox_lock);
return status;
}
+/* Use MCC for this command as it may be called in BH context */
int be_cmd_promiscuous_config(struct be_ctrl_info *ctrl, u8 port_num, bool en)
{
- struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
- struct be_cmd_req_promiscuous_config *req = embedded_payload(wrb);
- int status;
+ struct be_mcc_wrb *wrb;
+ struct be_cmd_req_promiscuous_config *req;
- spin_lock(&ctrl->cmd_lock);
- memset(wrb, 0, sizeof(*wrb));
+ spin_lock_bh(&ctrl->mcc_lock);
+
+ wrb = wrb_from_mcc(&ctrl->mcc_obj.q);
+ BUG_ON(!wrb);
+
+ req = embedded_payload(wrb);
be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
@@ -749,21 +937,29 @@ int be_cmd_promiscuous_config(struct be_ctrl_info *ctrl, u8 port_num, bool en)
else
req->port0_promiscuous = en;
- status = be_mbox_db_ring(ctrl);
+ be_mcc_notify_wait(ctrl);
- spin_unlock(&ctrl->cmd_lock);
- return status;
+ spin_unlock_bh(&ctrl->mcc_lock);
+ return 0;
}
-int be_cmd_mcast_mac_set(struct be_ctrl_info *ctrl, u32 if_id, u8 *mac_table,
- u32 num, bool promiscuous)
+/*
+ * Use MCC for this command as it may be called in BH context
+ * (mc == NULL) => multicast promiscous
+ */
+int be_cmd_multicast_set(struct be_ctrl_info *ctrl, u32 if_id,
+ struct dev_mc_list *mc_list, u32 mc_count)
{
- struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
- struct be_cmd_req_mcast_mac_config *req = embedded_payload(wrb);
- int status;
+#define BE_MAX_MC 32 /* set mcast promisc if > 32 */
+ struct be_mcc_wrb *wrb;
+ struct be_cmd_req_mcast_mac_config *req;
- spin_lock(&ctrl->cmd_lock);
- memset(wrb, 0, sizeof(*wrb));
+ spin_lock_bh(&ctrl->mcc_lock);
+
+ wrb = wrb_from_mcc(&ctrl->mcc_obj.q);
+ BUG_ON(!wrb);
+
+ req = embedded_payload(wrb);
be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
@@ -771,17 +967,23 @@ int be_cmd_mcast_mac_set(struct be_ctrl_info *ctrl, u32 if_id, u8 *mac_table,
OPCODE_COMMON_NTWK_MULTICAST_SET, sizeof(*req));
req->interface_id = if_id;
- req->promiscuous = promiscuous;
- if (!promiscuous) {
- req->num_mac = cpu_to_le16(num);
- if (num)
- memcpy(req->mac, mac_table, ETH_ALEN * num);
+ if (mc_list && mc_count <= BE_MAX_MC) {
+ int i;
+ struct dev_mc_list *mc;
+
+ req->num_mac = cpu_to_le16(mc_count);
+
+ for (mc = mc_list, i = 0; mc; mc = mc->next, i++)
+ memcpy(req->mac[i].byte, mc->dmi_addr, ETH_ALEN);
+ } else {
+ req->promiscuous = 1;
}
- status = be_mbox_db_ring(ctrl);
+ be_mcc_notify_wait(ctrl);
- spin_unlock(&ctrl->cmd_lock);
- return status;
+ spin_unlock_bh(&ctrl->mcc_lock);
+
+ return 0;
}
int be_cmd_set_flow_control(struct be_ctrl_info *ctrl, u32 tx_fc, u32 rx_fc)
@@ -790,7 +992,7 @@ int be_cmd_set_flow_control(struct be_ctrl_info *ctrl, u32 tx_fc, u32 rx_fc)
struct be_cmd_req_set_flow_control *req = embedded_payload(wrb);
int status;
- spin_lock(&ctrl->cmd_lock);
+ spin_lock(&ctrl->mbox_lock);
memset(wrb, 0, sizeof(*wrb));
@@ -804,7 +1006,7 @@ int be_cmd_set_flow_control(struct be_ctrl_info *ctrl, u32 tx_fc, u32 rx_fc)
status = be_mbox_db_ring(ctrl);
- spin_unlock(&ctrl->cmd_lock);
+ spin_unlock(&ctrl->mbox_lock);
return status;
}
@@ -814,7 +1016,7 @@ int be_cmd_get_flow_control(struct be_ctrl_info *ctrl, u32 *tx_fc, u32 *rx_fc)
struct be_cmd_req_get_flow_control *req = embedded_payload(wrb);
int status;
- spin_lock(&ctrl->cmd_lock);
+ spin_lock(&ctrl->mbox_lock);
memset(wrb, 0, sizeof(*wrb));
@@ -831,7 +1033,7 @@ int be_cmd_get_flow_control(struct be_ctrl_info *ctrl, u32 *tx_fc, u32 *rx_fc)
*rx_fc = le16_to_cpu(resp->rx_flow_control);
}
- spin_unlock(&ctrl->cmd_lock);
+ spin_unlock(&ctrl->mbox_lock);
return status;
}
@@ -841,7 +1043,7 @@ int be_cmd_query_fw_cfg(struct be_ctrl_info *ctrl, u32 *port_num)
struct be_cmd_req_query_fw_cfg *req = embedded_payload(wrb);
int status;
- spin_lock(&ctrl->cmd_lock);
+ spin_lock(&ctrl->mbox_lock);
memset(wrb, 0, sizeof(*wrb));
@@ -856,6 +1058,6 @@ int be_cmd_query_fw_cfg(struct be_ctrl_info *ctrl, u32 *port_num)
*port_num = le32_to_cpu(resp->phys_port);
}
- spin_unlock(&ctrl->cmd_lock);
+ spin_unlock(&ctrl->mbox_lock);
return status;
}
diff --git a/drivers/net/benet/be_cmds.h b/drivers/net/benet/be_cmds.h
index e499e2d5b8c3..747626da7b4e 100644
--- a/drivers/net/benet/be_cmds.h
+++ b/drivers/net/benet/be_cmds.h
@@ -76,6 +76,34 @@ struct be_mcc_cq_entry {
u32 flags; /* dword 3 */
};
+/* When the async bit of mcc_compl is set, the last 4 bytes of
+ * mcc_compl is interpreted as follows:
+ */
+#define ASYNC_TRAILER_EVENT_CODE_SHIFT 8 /* bits 8 - 15 */
+#define ASYNC_TRAILER_EVENT_CODE_MASK 0xFF
+#define ASYNC_EVENT_CODE_LINK_STATE 0x1
+struct be_async_event_trailer {
+ u32 code;
+};
+
+enum {
+ ASYNC_EVENT_LINK_DOWN = 0x0,
+ ASYNC_EVENT_LINK_UP = 0x1
+};
+
+/* When the event code of an async trailer is link-state, the mcc_compl
+ * must be interpreted as follows
+ */
+struct be_async_event_link_state {
+ u8 physical_port;
+ u8 port_link_status;
+ u8 port_duplex;
+ u8 port_speed;
+ u8 port_fault;
+ u8 rsvd0[7];
+ struct be_async_event_trailer trailer;
+} __packed;
+
struct be_mcc_mailbox {
struct be_mcc_wrb wrb;
struct be_mcc_cq_entry cqe;
@@ -101,6 +129,7 @@ struct be_mcc_mailbox {
#define OPCODE_COMMON_FIRMWARE_CONFIG 42
#define OPCODE_COMMON_NTWK_INTERFACE_CREATE 50
#define OPCODE_COMMON_NTWK_INTERFACE_DESTROY 51
+#define OPCODE_COMMON_MCC_DESTROY 53
#define OPCODE_COMMON_CQ_DESTROY 54
#define OPCODE_COMMON_EQ_DESTROY 55
#define OPCODE_COMMON_QUERY_FIRMWARE_CONFIG 58
@@ -269,6 +298,38 @@ struct be_cmd_resp_cq_create {
u16 rsvd0;
} __packed;
+/******************** Create MCCQ ***************************/
+/* Pseudo amap definition in which each bit of the actual structure is defined
+ * as a byte: used to calculate offset/shift/mask of each field */
+struct amap_mcc_context {
+ u8 con_index[14];
+ u8 rsvd0[2];
+ u8 ring_size[4];
+ u8 fetch_wrb;
+ u8 fetch_r2t;
+ u8 cq_id[10];
+ u8 prod_index[14];
+ u8 fid[8];
+ u8 pdid[9];
+ u8 valid;
+ u8 rsvd1[32];
+ u8 rsvd2[32];
+} __packed;
+
+struct be_cmd_req_mcc_create {
+ struct be_cmd_req_hdr hdr;
+ u16 num_pages;
+ u16 rsvd0;
+ u8 context[sizeof(struct amap_mcc_context) / 8];
+ struct phys_addr pages[8];
+} __packed;
+
+struct be_cmd_resp_mcc_create {
+ struct be_cmd_resp_hdr hdr;
+ u16 id;
+ u16 rsvd0;
+} __packed;
+
/******************** Create TxQ ***************************/
#define BE_ETH_TX_RING_TYPE_STANDARD 2
#define BE_ULP1_NUM 1
@@ -341,7 +402,8 @@ enum {
QTYPE_EQ = 1,
QTYPE_CQ,
QTYPE_TXQ,
- QTYPE_RXQ
+ QTYPE_RXQ,
+ QTYPE_MCCQ
};
struct be_cmd_req_q_destroy {
@@ -546,12 +608,6 @@ struct be_cmd_req_link_status {
u32 rsvd;
};
-struct be_link_info {
- u8 duplex;
- u8 speed;
- u8 fault;
-};
-
enum {
PHY_LINK_DUPLEX_NONE = 0x0,
PHY_LINK_DUPLEX_HALF = 0x1,
@@ -657,6 +713,9 @@ extern int be_cmd_cq_create(struct be_ctrl_info *ctrl,
struct be_queue_info *cq, struct be_queue_info *eq,
bool sol_evts, bool no_delay,
int num_cqe_dma_coalesce);
+extern int be_cmd_mccq_create(struct be_ctrl_info *ctrl,
+ struct be_queue_info *mccq,
+ struct be_queue_info *cq);
extern int be_cmd_txq_create(struct be_ctrl_info *ctrl,
struct be_queue_info *txq,
struct be_queue_info *cq);
@@ -667,7 +726,7 @@ extern int be_cmd_rxq_create(struct be_ctrl_info *ctrl,
extern int be_cmd_q_destroy(struct be_ctrl_info *ctrl, struct be_queue_info *q,
int type);
extern int be_cmd_link_status_query(struct be_ctrl_info *ctrl,
- struct be_link_info *link);
+ bool *link_up);
extern int be_cmd_reset(struct be_ctrl_info *ctrl);
extern int be_cmd_get_stats(struct be_ctrl_info *ctrl,
struct be_dma_mem *nonemb_cmd);
@@ -679,10 +738,11 @@ extern int be_cmd_vlan_config(struct be_ctrl_info *ctrl, u32 if_id,
bool promiscuous);
extern int be_cmd_promiscuous_config(struct be_ctrl_info *ctrl,
u8 port_num, bool en);
-extern int be_cmd_mcast_mac_set(struct be_ctrl_info *ctrl, u32 if_id,
- u8 *mac_table, u32 num, bool promiscuous);
+extern int be_cmd_multicast_set(struct be_ctrl_info *ctrl, u32 if_id,
+ struct dev_mc_list *mc_list, u32 mc_count);
extern int be_cmd_set_flow_control(struct be_ctrl_info *ctrl,
u32 tx_fc, u32 rx_fc);
extern int be_cmd_get_flow_control(struct be_ctrl_info *ctrl,
u32 *tx_fc, u32 *rx_fc);
extern int be_cmd_query_fw_cfg(struct be_ctrl_info *ctrl, u32 *port_num);
+extern void be_process_mcc(struct be_ctrl_info *ctrl);
diff --git a/drivers/net/benet/be_ethtool.c b/drivers/net/benet/be_ethtool.c
index 9592f22e4c8c..cccc5419ad72 100644
--- a/drivers/net/benet/be_ethtool.c
+++ b/drivers/net/benet/be_ethtool.c
@@ -162,8 +162,8 @@ be_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *coalesce)
return -EINVAL;
adapter->max_rx_coal = coalesce->rx_max_coalesced_frames;
- if (adapter->max_rx_coal > MAX_SKB_FRAGS)
- adapter->max_rx_coal = MAX_SKB_FRAGS - 1;
+ if (adapter->max_rx_coal > BE_MAX_FRAGS_PER_FRAME)
+ adapter->max_rx_coal = BE_MAX_FRAGS_PER_FRAME;
/* if AIC is being turned on now, start with an EQD of 0 */
if (rx_eq->enable_aic == 0 &&
diff --git a/drivers/net/benet/be_hw.h b/drivers/net/benet/be_hw.h
index b132aa4893ca..29c33c709c6d 100644
--- a/drivers/net/benet/be_hw.h
+++ b/drivers/net/benet/be_hw.h
@@ -55,13 +55,17 @@
#define MEMBAR_CTRL_INT_CTRL_PFUNC_MASK 0x7 /* bits 26 - 28 */
#define MEMBAR_CTRL_INT_CTRL_PFUNC_SHIFT 26
+/********* ISR0 Register offset **********/
+#define CEV_ISR0_OFFSET 0xC18
+#define CEV_ISR_SIZE 4
+
/********* Event Q door bell *************/
#define DB_EQ_OFFSET DB_CQ_OFFSET
#define DB_EQ_RING_ID_MASK 0x1FF /* bits 0 - 8 */
/* Clear the interrupt for this eq */
#define DB_EQ_CLR_SHIFT (9) /* bit 9 */
/* Must be 1 */
-#define DB_EQ_EVNT_SHIFT (10) /* bit 10 */
+#define DB_EQ_EVNT_SHIFT (10) /* bit 10 */
/* Number of event entries processed */
#define DB_EQ_NUM_POPPED_SHIFT (16) /* bits 16 - 28 */
/* Rearm bit */
@@ -88,6 +92,12 @@
/* Number of rx frags posted */
#define DB_RQ_NUM_POSTED_SHIFT (24) /* bits 24 - 31 */
+/********** MCC door bell ************/
+#define DB_MCCQ_OFFSET 0x140
+#define DB_MCCQ_RING_ID_MASK 0x7FF /* bits 0 - 10 */
+/* Number of entries posted */
+#define DB_MCCQ_NUM_POSTED_SHIFT (16) /* bits 16 - 29 */
+
/*
* BE descriptors: host memory data structures whose formats
* are hardwired in BE silicon.
diff --git a/drivers/net/benet/be_main.c b/drivers/net/benet/be_main.c
index 66bb56874d9b..c43f6a119295 100644
--- a/drivers/net/benet/be_main.c
+++ b/drivers/net/benet/be_main.c
@@ -60,26 +60,6 @@ static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
return 0;
}
-static inline void *queue_head_node(struct be_queue_info *q)
-{
- return q->dma_mem.va + q->head * q->entry_size;
-}
-
-static inline void *queue_tail_node(struct be_queue_info *q)
-{
- return q->dma_mem.va + q->tail * q->entry_size;
-}
-
-static inline void queue_head_inc(struct be_queue_info *q)
-{
- index_inc(&q->head, q->len);
-}
-
-static inline void queue_tail_inc(struct be_queue_info *q)
-{
- index_inc(&q->tail, q->len);
-}
-
static void be_intr_set(struct be_ctrl_info *ctrl, bool enable)
{
u8 __iomem *addr = ctrl->pcicfg + PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET;
@@ -127,7 +107,7 @@ static void be_eq_notify(struct be_ctrl_info *ctrl, u16 qid,
iowrite32(val, ctrl->db + DB_EQ_OFFSET);
}
-static void be_cq_notify(struct be_ctrl_info *ctrl, u16 qid,
+void be_cq_notify(struct be_ctrl_info *ctrl, u16 qid,
bool arm, u16 num_popped)
{
u32 val = 0;
@@ -234,28 +214,24 @@ static void netdev_stats_update(struct be_adapter *adapter)
dev_stats->tx_window_errors = 0;
}
-static void be_link_status_update(struct be_adapter *adapter)
+void be_link_status_update(void *ctxt, bool link_up)
{
- struct be_link_info *prev = &adapter->link;
- struct be_link_info now = { 0 };
+ struct be_adapter *adapter = ctxt;
struct net_device *netdev = adapter->netdev;
- be_cmd_link_status_query(&adapter->ctrl, &now);
-
/* If link came up or went down */
- if (now.speed != prev->speed && (now.speed == PHY_LINK_SPEED_ZERO ||
- prev->speed == PHY_LINK_SPEED_ZERO)) {
- if (now.speed == PHY_LINK_SPEED_ZERO) {
- netif_stop_queue(netdev);
- netif_carrier_off(netdev);
- printk(KERN_INFO "%s: Link down\n", netdev->name);
- } else {
+ if (adapter->link_up != link_up) {
+ if (link_up) {
netif_start_queue(netdev);
netif_carrier_on(netdev);
printk(KERN_INFO "%s: Link up\n", netdev->name);
+ } else {
+ netif_stop_queue(netdev);
+ netif_carrier_off(netdev);
+ printk(KERN_INFO "%s: Link down\n", netdev->name);
}
+ adapter->link_up = link_up;
}
- *prev = now;
}
/* Update the EQ delay n BE based on the RX frags consumed / sec */
@@ -569,47 +545,32 @@ static void be_vlan_rem_vid(struct net_device *netdev, u16 vid)
be_vid_config(netdev);
}
-static void be_set_multicast_filter(struct net_device *netdev)
+static void be_set_multicast_list(struct net_device *netdev)
{
struct be_adapter *adapter = netdev_priv(netdev);
- struct dev_mc_list *mc_ptr;
- u8 mac_addr[32][ETH_ALEN];
- int i = 0;
+ struct be_ctrl_info *ctrl = &adapter->ctrl;
- if (netdev->flags & IFF_ALLMULTI) {
- /* set BE in Multicast promiscuous */
- be_cmd_mcast_mac_set(&adapter->ctrl,
- adapter->if_handle, NULL, 0, true);
- return;
+ if (netdev->flags & IFF_PROMISC) {
+ be_cmd_promiscuous_config(ctrl, adapter->port_num, 1);
+ adapter->promiscuous = true;
+ goto done;
}
- for (mc_ptr = netdev->mc_list; mc_ptr; mc_ptr = mc_ptr->next) {
- memcpy(&mac_addr[i][0], mc_ptr->dmi_addr, ETH_ALEN);
- if (++i >= 32) {
- be_cmd_mcast_mac_set(&adapter->ctrl,
- adapter->if_handle, &mac_addr[0][0], i, false);
- i = 0;
- }
-
+ /* BE was previously in promiscous mode; disable it */
+ if (adapter->promiscuous) {
+ adapter->promiscuous = false;
+ be_cmd_promiscuous_config(ctrl, adapter->port_num, 0);
}
- if (i) {
- /* reset the promiscuous mode also. */
- be_cmd_mcast_mac_set(&adapter->ctrl,
- adapter->if_handle, &mac_addr[0][0], i, false);
+ if (netdev->flags & IFF_ALLMULTI) {
+ be_cmd_multicast_set(ctrl, adapter->if_handle, NULL, 0);
+ goto done;
}
-}
-static void be_set_multicast_list(struct net_device *netdev)
-{
- struct be_adapter *adapter = netdev_priv(netdev);
-
- if (netdev->flags & IFF_PROMISC) {
- be_cmd_promiscuous_config(&adapter->ctrl, adapter->port_num, 1);
- } else {
- be_cmd_promiscuous_config(&adapter->ctrl, adapter->port_num, 0);
- be_set_multicast_filter(netdev);
- }
+ be_cmd_multicast_set(ctrl, adapter->if_handle, netdev->mc_list,
+ netdev->mc_count);
+done:
+ return;
}
static void be_rx_rate_update(struct be_adapter *adapter)
@@ -705,7 +666,7 @@ static void skb_fill_rx_data(struct be_adapter *adapter,
{
struct be_queue_info *rxq = &adapter->rx_obj.q;
struct be_rx_page_info *page_info;
- u16 rxq_idx, i, num_rcvd;
+ u16 rxq_idx, i, num_rcvd, j;
u32 pktsize, hdr_len, curr_frag_len;
u8 *start;
@@ -748,22 +709,33 @@ static void skb_fill_rx_data(struct be_adapter *adapter,
/* More frags present for this completion */
pktsize -= curr_frag_len; /* account for above copied frag */
- for (i = 1; i < num_rcvd; i++) {
+ for (i = 1, j = 0; i < num_rcvd; i++) {
index_inc(&rxq_idx, rxq->len);
page_info = get_rx_page_info(adapter, rxq_idx);
curr_frag_len = min(pktsize, rx_frag_size);
- skb_shinfo(skb)->frags[i].page = page_info->page;
- skb_shinfo(skb)->frags[i].page_offset = page_info->page_offset;
- skb_shinfo(skb)->frags[i].size = curr_frag_len;
+ /* Coalesce all frags from the same physical page in one slot */
+ if (page_info->page_offset == 0) {
+ /* Fresh page */
+ j++;
+ skb_shinfo(skb)->frags[j].page = page_info->page;
+ skb_shinfo(skb)->frags[j].page_offset =
+ page_info->page_offset;
+ skb_shinfo(skb)->frags[j].size = 0;
+ skb_shinfo(skb)->nr_frags++;
+ } else {
+ put_page(page_info->page);
+ }
+
+ skb_shinfo(skb)->frags[j].size += curr_frag_len;
skb->len += curr_frag_len;
skb->data_len += curr_frag_len;
- skb_shinfo(skb)->nr_frags++;
pktsize -= curr_frag_len;
memset(page_info, 0, sizeof(*page_info));
}
+ BUG_ON(j > MAX_SKB_FRAGS);
done:
be_rx_stats_update(adapter, pktsize, num_rcvd);
@@ -825,7 +797,7 @@ static void be_rx_compl_process_lro(struct be_adapter *adapter,
struct skb_frag_struct rx_frags[BE_MAX_FRAGS_PER_FRAME];
struct be_queue_info *rxq = &adapter->rx_obj.q;
u32 num_rcvd, pkt_size, remaining, vlanf, curr_frag_len;
- u16 i, rxq_idx = 0, vid;
+ u16 i, rxq_idx = 0, vid, j;
num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp);
pkt_size = AMAP_GET_BITS(struct amap_eth_rx_compl, pktsize, rxcp);
@@ -833,20 +805,28 @@ static void be_rx_compl_process_lro(struct be_adapter *adapter,
rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp);
remaining = pkt_size;
- for (i = 0; i < num_rcvd; i++) {
+ for (i = 0, j = -1; i < num_rcvd; i++) {
page_info = get_rx_page_info(adapter, rxq_idx);
curr_frag_len = min(remaining, rx_frag_size);
- rx_frags[i].page = page_info->page;
- rx_frags[i].page_offset = page_info->page_offset;
- rx_frags[i].size = curr_frag_len;
- remaining -= curr_frag_len;
+ /* Coalesce all frags from the same physical page in one slot */
+ if (i == 0 || page_info->page_offset == 0) {
+ /* First frag or Fresh page */
+ j++;
+ rx_frags[j].page = page_info->page;
+ rx_frags[j].page_offset = page_info->page_offset;
+ rx_frags[j].size = 0;
+ } else {
+ put_page(page_info->page);
+ }
+ rx_frags[j].size += curr_frag_len;
+ remaining -= curr_frag_len;
index_inc(&rxq_idx, rxq->len);
-
memset(page_info, 0, sizeof(*page_info));
}
+ BUG_ON(j > MAX_SKB_FRAGS);
if (likely(!vlanf)) {
lro_receive_frags(&adapter->rx_obj.lro_mgr, rx_frags, pkt_size,
@@ -960,10 +940,8 @@ static void be_post_rx_frags(struct be_adapter *adapter)
return;
}
-static struct be_eth_tx_compl *
-be_tx_compl_get(struct be_adapter *adapter)
+static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
{
- struct be_queue_info *tx_cq = &adapter->tx_obj.cq;
struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
@@ -1051,6 +1029,59 @@ static void be_tx_q_clean(struct be_adapter *adapter)
}
}
+static void be_mcc_queues_destroy(struct be_adapter *adapter)
+{
+ struct be_queue_info *q;
+ struct be_ctrl_info *ctrl = &adapter->ctrl;
+
+ q = &ctrl->mcc_obj.q;
+ if (q->created)
+ be_cmd_q_destroy(ctrl, q, QTYPE_MCCQ);
+ be_queue_free(adapter, q);
+
+ q = &ctrl->mcc_obj.cq;
+ if (q->created)
+ be_cmd_q_destroy(ctrl, q, QTYPE_CQ);
+ be_queue_free(adapter, q);
+}
+
+/* Must be called only after TX qs are created as MCC shares TX EQ */
+static int be_mcc_queues_create(struct be_adapter *adapter)
+{
+ struct be_queue_info *q, *cq;
+ struct be_ctrl_info *ctrl = &adapter->ctrl;
+
+ /* Alloc MCC compl queue */
+ cq = &ctrl->mcc_obj.cq;
+ if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
+ sizeof(struct be_mcc_cq_entry)))
+ goto err;
+
+ /* Ask BE to create MCC compl queue; share TX's eq */
+ if (be_cmd_cq_create(ctrl, cq, &adapter->tx_eq.q, false, true, 0))
+ goto mcc_cq_free;
+
+ /* Alloc MCC queue */
+ q = &ctrl->mcc_obj.q;
+ if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
+ goto mcc_cq_destroy;
+
+ /* Ask BE to create MCC queue */
+ if (be_cmd_mccq_create(ctrl, q, cq))
+ goto mcc_q_free;
+
+ return 0;
+
+mcc_q_free:
+ be_queue_free(adapter, q);
+mcc_cq_destroy:
+ be_cmd_q_destroy(ctrl, cq, QTYPE_CQ);
+mcc_cq_free:
+ be_queue_free(adapter, cq);
+err:
+ return -1;
+}
+
static void be_tx_queues_destroy(struct be_adapter *adapter)
{
struct be_queue_info *q;
@@ -1243,15 +1274,17 @@ static irqreturn_t be_intx(int irq, void *dev)
{
struct be_adapter *adapter = dev;
struct be_ctrl_info *ctrl = &adapter->ctrl;
- int rx, tx;
+ int isr;
- tx = event_handle(ctrl, &adapter->tx_eq);
- rx = event_handle(ctrl, &adapter->rx_eq);
+ isr = ioread32(ctrl->csr + CEV_ISR0_OFFSET +
+ ctrl->pci_func * CEV_ISR_SIZE);
+ if (!isr)
+ return IRQ_NONE;
- if (rx || tx)
- return IRQ_HANDLED;
- else
- return IRQ_NONE;
+ event_handle(ctrl, &adapter->tx_eq);
+ event_handle(ctrl, &adapter->rx_eq);
+
+ return IRQ_HANDLED;
}
static irqreturn_t be_msix_rx(int irq, void *dev)
@@ -1263,7 +1296,7 @@ static irqreturn_t be_msix_rx(int irq, void *dev)
return IRQ_HANDLED;
}
-static irqreturn_t be_msix_tx(int irq, void *dev)
+static irqreturn_t be_msix_tx_mcc(int irq, void *dev)
{
struct be_adapter *adapter = dev;
@@ -1324,40 +1357,51 @@ int be_poll_rx(struct napi_struct *napi, int budget)
return work_done;
}
-/* For TX we don't honour budget; consume everything */
-int be_poll_tx(struct napi_struct *napi, int budget)
+void be_process_tx(struct be_adapter *adapter)
{
- struct be_eq_obj *tx_eq = container_of(napi, struct be_eq_obj, napi);
- struct be_adapter *adapter =
- container_of(tx_eq, struct be_adapter, tx_eq);
- struct be_tx_obj *tx_obj = &adapter->tx_obj;
- struct be_queue_info *tx_cq = &tx_obj->cq;
- struct be_queue_info *txq = &tx_obj->q;
+ struct be_queue_info *txq = &adapter->tx_obj.q;
+ struct be_queue_info *tx_cq = &adapter->tx_obj.cq;
struct be_eth_tx_compl *txcp;
u32 num_cmpl = 0;
u16 end_idx;
- while ((txcp = be_tx_compl_get(adapter))) {
+ while ((txcp = be_tx_compl_get(tx_cq))) {
end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
wrb_index, txcp);
be_tx_compl_process(adapter, end_idx);
num_cmpl++;
}
- /* As Tx wrbs have been freed up, wake up netdev queue if
- * it was stopped due to lack of tx wrbs.
- */
- if (netif_queue_stopped(adapter->netdev) &&
+ if (num_cmpl) {
+ be_cq_notify(&adapter->ctrl, tx_cq->id, true, num_cmpl);
+
+ /* As Tx wrbs have been freed up, wake up netdev queue if
+ * it was stopped due to lack of tx wrbs.
+ */
+ if (netif_queue_stopped(adapter->netdev) &&
atomic_read(&txq->used) < txq->len / 2) {
- netif_wake_queue(adapter->netdev);
+ netif_wake_queue(adapter->netdev);
+ }
+
+ drvr_stats(adapter)->be_tx_events++;
+ drvr_stats(adapter)->be_tx_compl += num_cmpl;
}
+}
+
+/* As TX and MCC share the same EQ check for both TX and MCC completions.
+ * For TX/MCC we don't honour budget; consume everything
+ */
+static int be_poll_tx_mcc(struct napi_struct *napi, int budget)
+{
+ struct be_eq_obj *tx_eq = container_of(napi, struct be_eq_obj, napi);
+ struct be_adapter *adapter =
+ container_of(tx_eq, struct be_adapter, tx_eq);
napi_complete(napi);
- be_cq_notify(&adapter->ctrl, tx_cq->id, true, num_cmpl);
+ be_process_tx(adapter);
- drvr_stats(adapter)->be_tx_events++;
- drvr_stats(adapter)->be_tx_compl += num_cmpl;
+ be_process_mcc(&adapter->ctrl);
return 1;
}
@@ -1368,9 +1412,6 @@ static void be_worker(struct work_struct *work)
container_of(work, struct be_adapter, work.work);
int status;
- /* Check link */
- be_link_status_update(adapter);
-
/* Get Stats */
status = be_cmd_get_stats(&adapter->ctrl, &adapter->stats.cmd);
if (!status)
@@ -1419,7 +1460,7 @@ static int be_msix_register(struct be_adapter *adapter)
sprintf(tx_eq->desc, "%s-tx", netdev->name);
vec = be_msix_vec_get(adapter, tx_eq->q.id);
- status = request_irq(vec, be_msix_tx, 0, tx_eq->desc, adapter);
+ status = request_irq(vec, be_msix_tx_mcc, 0, tx_eq->desc, adapter);
if (status)
goto err;
@@ -1495,6 +1536,39 @@ static int be_open(struct net_device *netdev)
struct be_ctrl_info *ctrl = &adapter->ctrl;
struct be_eq_obj *rx_eq = &adapter->rx_eq;
struct be_eq_obj *tx_eq = &adapter->tx_eq;
+ bool link_up;
+ int status;
+
+ /* First time posting */
+ be_post_rx_frags(adapter);
+
+ napi_enable(&rx_eq->napi);
+ napi_enable(&tx_eq->napi);
+
+ be_irq_register(adapter);
+
+ be_intr_set(ctrl, true);
+
+ /* The evt queues are created in unarmed state; arm them */
+ be_eq_notify(ctrl, rx_eq->q.id, true, false, 0);
+ be_eq_notify(ctrl, tx_eq->q.id, true, false, 0);
+
+ /* Rx compl queue may be in unarmed state; rearm it */
+ be_cq_notify(ctrl, adapter->rx_obj.cq.id, true, 0);
+
+ status = be_cmd_link_status_query(ctrl, &link_up);
+ if (status)
+ return status;
+ be_link_status_update(adapter, link_up);
+
+ schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
+ return 0;
+}
+
+static int be_setup(struct be_adapter *adapter)
+{
+ struct be_ctrl_info *ctrl = &adapter->ctrl;
+ struct net_device *netdev = adapter->netdev;
u32 if_flags;
int status;
@@ -1521,29 +1595,14 @@ static int be_open(struct net_device *netdev)
if (status != 0)
goto tx_qs_destroy;
- /* First time posting */
- be_post_rx_frags(adapter);
-
- napi_enable(&rx_eq->napi);
- napi_enable(&tx_eq->napi);
-
- be_irq_register(adapter);
-
- be_intr_set(ctrl, true);
-
- /* The evt queues are created in the unarmed state; arm them */
- be_eq_notify(ctrl, rx_eq->q.id, true, false, 0);
- be_eq_notify(ctrl, tx_eq->q.id, true, false, 0);
-
- /* The compl queues are created in the unarmed state; arm them */
- be_cq_notify(ctrl, adapter->rx_obj.cq.id, true, 0);
- be_cq_notify(ctrl, adapter->tx_obj.cq.id, true, 0);
-
- be_link_status_update(adapter);
+ status = be_mcc_queues_create(adapter);
+ if (status != 0)
+ goto rx_qs_destroy;
- schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
return 0;
+rx_qs_destroy:
+ be_rx_queues_destroy(adapter);
tx_qs_destroy:
be_tx_queues_destroy(adapter);
if_destroy:
@@ -1552,6 +1611,19 @@ do_none:
return status;
}
+static int be_clear(struct be_adapter *adapter)
+{
+ struct be_ctrl_info *ctrl = &adapter->ctrl;
+
+ be_rx_queues_destroy(adapter);
+ be_tx_queues_destroy(adapter);
+
+ be_cmd_if_destroy(ctrl, adapter->if_handle);
+
+ be_mcc_queues_destroy(adapter);
+ return 0;
+}
+
static int be_close(struct net_device *netdev)
{
struct be_adapter *adapter = netdev_priv(netdev);
@@ -1564,7 +1636,7 @@ static int be_close(struct net_device *netdev)
netif_stop_queue(netdev);
netif_carrier_off(netdev);
- adapter->link.speed = PHY_LINK_SPEED_ZERO;
+ adapter->link_up = false;
be_intr_set(ctrl, false);
@@ -1581,10 +1653,6 @@ static int be_close(struct net_device *netdev)
napi_disable(&rx_eq->napi);
napi_disable(&tx_eq->napi);
- be_rx_queues_destroy(adapter);
- be_tx_queues_destroy(adapter);
-
- be_cmd_if_destroy(ctrl, adapter->if_handle);
return 0;
}
@@ -1673,7 +1741,7 @@ static void be_netdev_init(struct net_device *netdev)
netif_napi_add(netdev, &adapter->rx_eq.napi, be_poll_rx,
BE_NAPI_WEIGHT);
- netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx,
+ netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx_mcc,
BE_NAPI_WEIGHT);
netif_carrier_off(netdev);
@@ -1755,7 +1823,12 @@ static int be_ctrl_init(struct be_adapter *adapter)
mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
- spin_lock_init(&ctrl->cmd_lock);
+ spin_lock_init(&ctrl->mbox_lock);
+ spin_lock_init(&ctrl->mcc_lock);
+ spin_lock_init(&ctrl->mcc_cq_lock);
+
+ ctrl->async_cb = be_link_status_update;
+ ctrl->adapter_ctxt = adapter;
val = ioread32(ctrl->pcicfg + PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET);
ctrl->pci_func = (val >> MEMBAR_CTRL_INT_CTRL_PFUNC_SHIFT) &
@@ -1793,6 +1866,8 @@ static void __devexit be_remove(struct pci_dev *pdev)
unregister_netdev(adapter->netdev);
+ be_clear(adapter);
+
be_stats_cleanup(adapter);
be_ctrl_cleanup(adapter);
@@ -1890,13 +1965,18 @@ static int __devinit be_probe(struct pci_dev *pdev,
be_netdev_init(netdev);
SET_NETDEV_DEV(netdev, &adapter->pdev->dev);
+ status = be_setup(adapter);
+ if (status)
+ goto stats_clean;
status = register_netdev(netdev);
if (status != 0)
- goto stats_clean;
+ goto unsetup;
dev_info(&pdev->dev, "%s port %d\n", nic_name(pdev), adapter->port_num);
return 0;
+unsetup:
+ be_clear(adapter);
stats_clean:
be_stats_cleanup(adapter);
ctrl_clean:
@@ -1921,6 +2001,7 @@ static int be_suspend(struct pci_dev *pdev, pm_message_t state)
if (netif_running(netdev)) {
rtnl_lock();
be_close(netdev);
+ be_clear(adapter);
rtnl_unlock();
}
@@ -1947,6 +2028,7 @@ static int be_resume(struct pci_dev *pdev)
if (netif_running(netdev)) {
rtnl_lock();
+ be_setup(adapter);
be_open(netdev);
rtnl_unlock();
}
diff --git a/drivers/net/bmac.c b/drivers/net/bmac.c
index 9578a3dfac01..206144f2470f 100644
--- a/drivers/net/bmac.c
+++ b/drivers/net/bmac.c
@@ -428,10 +428,11 @@ bmac_init_phy(struct net_device *dev)
printk(KERN_DEBUG "phy registers:");
for (addr = 0; addr < 32; ++addr) {
if ((addr & 7) == 0)
- printk("\n" KERN_DEBUG);
- printk(" %.4x", bmac_mif_read(dev, addr));
+ printk(KERN_DEBUG);
+ printk(KERN_CONT " %.4x", bmac_mif_read(dev, addr));
}
- printk("\n");
+ printk(KERN_CONT "\n");
+
if (bp->is_bmac_plus) {
unsigned int capable, ctrl;
diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c
index 38f1c3375d7f..b70cc99962fc 100644
--- a/drivers/net/bnx2.c
+++ b/drivers/net/bnx2.c
@@ -6825,6 +6825,14 @@ bnx2_nway_reset(struct net_device *dev)
return 0;
}
+static u32
+bnx2_get_link(struct net_device *dev)
+{
+ struct bnx2 *bp = netdev_priv(dev);
+
+ return bp->link_up;
+}
+
static int
bnx2_get_eeprom_len(struct net_device *dev)
{
@@ -7392,7 +7400,7 @@ static const struct ethtool_ops bnx2_ethtool_ops = {
.get_wol = bnx2_get_wol,
.set_wol = bnx2_set_wol,
.nway_reset = bnx2_nway_reset,
- .get_link = ethtool_op_get_link,
+ .get_link = bnx2_get_link,
.get_eeprom_len = bnx2_get_eeprom_len,
.get_eeprom = bnx2_get_eeprom,
.set_eeprom = bnx2_set_eeprom,
diff --git a/drivers/net/bnx2x.h b/drivers/net/bnx2x.h
index 8678457849f9..85a737c5c23f 100644
--- a/drivers/net/bnx2x.h
+++ b/drivers/net/bnx2x.h
@@ -902,6 +902,8 @@ struct bnx2x {
u16 rx_quick_cons_trip;
u16 rx_ticks_int;
u16 rx_ticks;
+/* Maximal coalescing timeout in us */
+#define BNX2X_MAX_COALESCE_TOUT (0xf0*12)
u32 lin_cnt;
diff --git a/drivers/net/bnx2x_main.c b/drivers/net/bnx2x_main.c
index fbf1352e9c1c..c36a5f33739f 100644
--- a/drivers/net/bnx2x_main.c
+++ b/drivers/net/bnx2x_main.c
@@ -484,8 +484,9 @@ static void bnx2x_fw_dump(struct bnx2x *bp)
mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
mark = ((mark + 0x3) & ~0x3);
- printk(KERN_ERR PFX "begin fw dump (mark 0x%x)\n" KERN_ERR, mark);
+ printk(KERN_ERR PFX "begin fw dump (mark 0x%x)\n", mark);
+ printk(KERN_ERR PFX);
for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
for (word = 0; word < 8; word++)
data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
@@ -500,7 +501,7 @@ static void bnx2x_fw_dump(struct bnx2x *bp)
data[8] = 0x0;
printk(KERN_CONT "%s", (char *)data);
}
- printk("\n" KERN_ERR PFX "end of fw dump\n");
+ printk(KERN_ERR PFX "end of fw dump\n");
}
static void bnx2x_panic_dump(struct bnx2x *bp)
@@ -4434,7 +4435,7 @@ static void bnx2x_update_coalesce(struct bnx2x *bp)
REG_WR16(bp, BAR_USTRORM_INTMEM +
USTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
U_SB_ETH_RX_CQ_INDEX),
- bp->rx_ticks ? 0 : 1);
+ (bp->rx_ticks/12) ? 0 : 1);
/* HC_INDEX_C_ETH_TX_CQ_CONS */
REG_WR8(bp, BAR_CSTRORM_INTMEM +
@@ -4444,7 +4445,7 @@ static void bnx2x_update_coalesce(struct bnx2x *bp)
REG_WR16(bp, BAR_CSTRORM_INTMEM +
CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
C_SB_ETH_TX_CQ_INDEX),
- bp->tx_ticks ? 0 : 1);
+ (bp->tx_ticks/12) ? 0 : 1);
}
}
@@ -7354,7 +7355,7 @@ static void bnx2x_reset_task(struct work_struct *work)
#ifdef BNX2X_STOP_ON_ERROR
BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
" so reset not done to allow debug dump,\n"
- KERN_ERR " you will need to reboot when done\n");
+ " you will need to reboot when done\n");
return;
#endif
@@ -8637,6 +8638,14 @@ static int bnx2x_nway_reset(struct net_device *dev)
return 0;
}
+static u32
+bnx2x_get_link(struct net_device *dev)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+
+ return bp->link_vars.link_up;
+}
+
static int bnx2x_get_eeprom_len(struct net_device *dev)
{
struct bnx2x *bp = netdev_priv(dev);
@@ -9061,12 +9070,12 @@ static int bnx2x_set_coalesce(struct net_device *dev,
struct bnx2x *bp = netdev_priv(dev);
bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
- if (bp->rx_ticks > 3000)
- bp->rx_ticks = 3000;
+ if (bp->rx_ticks > BNX2X_MAX_COALESCE_TOUT)
+ bp->rx_ticks = BNX2X_MAX_COALESCE_TOUT;
bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
- if (bp->tx_ticks > 0x3000)
- bp->tx_ticks = 0x3000;
+ if (bp->tx_ticks > BNX2X_MAX_COALESCE_TOUT)
+ bp->tx_ticks = BNX2X_MAX_COALESCE_TOUT;
if (netif_running(dev))
bnx2x_update_coalesce(bp);
@@ -10034,7 +10043,7 @@ static struct ethtool_ops bnx2x_ethtool_ops = {
.get_msglevel = bnx2x_get_msglevel,
.set_msglevel = bnx2x_set_msglevel,
.nway_reset = bnx2x_nway_reset,
- .get_link = ethtool_op_get_link,
+ .get_link = bnx2x_get_link,
.get_eeprom_len = bnx2x_get_eeprom_len,
.get_eeprom = bnx2x_get_eeprom,
.set_eeprom = bnx2x_set_eeprom,
diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig
index d5e18812bf49..33821a81cbf8 100644
--- a/drivers/net/can/Kconfig
+++ b/drivers/net/can/Kconfig
@@ -36,7 +36,7 @@ config CAN_CALC_BITTIMING
If unsure, say Y.
config CAN_SJA1000
- depends on CAN_DEV
+ depends on CAN_DEV && HAS_IOMEM
tristate "Philips SJA1000"
---help---
Driver for the SJA1000 CAN controllers from Philips or NXP
diff --git a/drivers/net/cnic.c b/drivers/net/cnic.c
index 44f77eb1180f..4d1515f45ba2 100644
--- a/drivers/net/cnic.c
+++ b/drivers/net/cnic.c
@@ -25,8 +25,6 @@
#include <linux/delay.h>
#include <linux/ethtool.h>
#include <linux/if_vlan.h>
-#include <linux/module.h>
-
#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
#define BCM_VLAN 1
#endif
@@ -2521,9 +2519,9 @@ static struct cnic_dev *init_bnx2_cnic(struct net_device *dev)
struct cnic_dev *cdev;
struct cnic_local *cp;
struct cnic_eth_dev *ethdev = NULL;
- struct cnic_eth_dev *(*probe)(void *) = NULL;
+ struct cnic_eth_dev *(*probe)(struct net_device *) = NULL;
- probe = __symbol_get("bnx2_cnic_probe");
+ probe = symbol_get(bnx2_cnic_probe);
if (probe) {
ethdev = (*probe)(dev);
symbol_put_addr(probe);
diff --git a/drivers/net/cnic_if.h b/drivers/net/cnic_if.h
index 06380963a34e..d1bce27ee99e 100644
--- a/drivers/net/cnic_if.h
+++ b/drivers/net/cnic_if.h
@@ -296,4 +296,6 @@ extern int cnic_register_driver(int ulp_type, struct cnic_ulp_ops *ulp_ops);
extern int cnic_unregister_driver(int ulp_type);
+extern struct cnic_eth_dev *bnx2_cnic_probe(struct net_device *dev);
+
#endif
diff --git a/drivers/net/cpmac.c b/drivers/net/cpmac.c
index 58afafbd3b9c..fd5e32cbcb87 100644
--- a/drivers/net/cpmac.c
+++ b/drivers/net/cpmac.c
@@ -1097,7 +1097,7 @@ static const struct net_device_ops cpmac_netdev_ops = {
.ndo_start_xmit = cpmac_start_xmit,
.ndo_tx_timeout = cpmac_tx_timeout,
.ndo_set_multicast_list = cpmac_set_multicast_list,
- .ndo_so_ioctl = cpmac_ioctl,
+ .ndo_do_ioctl = cpmac_ioctl,
.ndo_set_config = cpmac_config,
.ndo_change_mtu = eth_change_mtu,
.ndo_validate_addr = eth_validate_addr,
diff --git a/drivers/net/cxgb3/cxgb3_main.c b/drivers/net/cxgb3/cxgb3_main.c
index 538dda4422dc..fb5df5c6203e 100644
--- a/drivers/net/cxgb3/cxgb3_main.c
+++ b/drivers/net/cxgb3/cxgb3_main.c
@@ -642,8 +642,7 @@ static int setup_sge_qsets(struct adapter *adap)
struct port_info *pi = netdev_priv(dev);
pi->qs = &adap->sge.qs[pi->first_qset];
- for (j = pi->first_qset; j < pi->first_qset + pi->nqsets;
- ++j, ++qset_idx) {
+ for (j = 0; j < pi->nqsets; ++j, ++qset_idx) {
set_qset_lro(dev, qset_idx, pi->rx_offload & T3_LRO);
err = t3_sge_alloc_qset(adap, qset_idx, 1,
(adap->flags & USING_MSIX) ? qset_idx + 1 :
diff --git a/drivers/net/davinci_emac.c b/drivers/net/davinci_emac.c
index 2df8fb0af701..12fd446f9895 100644
--- a/drivers/net/davinci_emac.c
+++ b/drivers/net/davinci_emac.c
@@ -1820,11 +1820,19 @@ static int emac_dev_setmac_addr(struct net_device *ndev, void *addr)
struct device *emac_dev = &priv->ndev->dev;
struct sockaddr *sa = addr;
+ if (!is_valid_ether_addr(sa->sa_data))
+ return -EINVAL;
+
/* Store mac addr in priv and rx channel and set it in EMAC hw */
memcpy(priv->mac_addr, sa->sa_data, ndev->addr_len);
- memcpy(rxch->mac_addr, sa->sa_data, ndev->addr_len);
memcpy(ndev->dev_addr, sa->sa_data, ndev->addr_len);
- emac_setmac(priv, EMAC_DEF_RX_CH, rxch->mac_addr);
+
+ /* If the interface is down - rxch is NULL. */
+ /* MAC address is configured only after the interface is enabled. */
+ if (netif_running(ndev)) {
+ memcpy(rxch->mac_addr, sa->sa_data, ndev->addr_len);
+ emac_setmac(priv, EMAC_DEF_RX_CH, rxch->mac_addr);
+ }
if (netif_msg_drv(priv))
dev_notice(emac_dev, "DaVinci EMAC: emac_dev_setmac_addr %pM\n",
diff --git a/drivers/net/dl2k.c b/drivers/net/dl2k.c
index 895d72143ee0..4b6a219fecea 100644
--- a/drivers/net/dl2k.c
+++ b/drivers/net/dl2k.c
@@ -268,8 +268,9 @@ rio_probe1 (struct pci_dev *pdev, const struct pci_device_id *ent)
printk(KERN_INFO "tx_coalesce:\t%d packets\n",
tx_coalesce);
if (np->coalesce)
- printk(KERN_INFO "rx_coalesce:\t%d packets\n"
- KERN_INFO "rx_timeout: \t%d ns\n",
+ printk(KERN_INFO
+ "rx_coalesce:\t%d packets\n"
+ "rx_timeout: \t%d ns\n",
np->rx_coalesce, np->rx_timeout*640);
if (np->vlan)
printk(KERN_INFO "vlan(id):\t%d\n", np->vlan);
@@ -1522,9 +1523,9 @@ mii_get_media (struct net_device *dev)
printk (KERN_INFO "Operating at 10 Mbps, ");
}
if (bmcr & MII_BMCR_DUPLEX_MODE) {
- printk ("Full duplex\n");
+ printk (KERN_CONT "Full duplex\n");
} else {
- printk ("Half duplex\n");
+ printk (KERN_CONT "Half duplex\n");
}
}
if (np->tx_flow)
@@ -1614,9 +1615,9 @@ mii_set_media (struct net_device *dev)
}
if (np->full_duplex) {
bmcr |= MII_BMCR_DUPLEX_MODE;
- printk ("Full duplex\n");
+ printk (KERN_CONT "Full duplex\n");
} else {
- printk ("Half duplex\n");
+ printk (KERN_CONT "Half duplex\n");
}
#if 0
/* Set 1000BaseT Master/Slave setting */
@@ -1669,9 +1670,9 @@ mii_get_media_pcs (struct net_device *dev)
__u16 bmcr = mii_read (dev, phy_addr, PCS_BMCR);
printk (KERN_INFO "Operating at 1000 Mbps, ");
if (bmcr & MII_BMCR_DUPLEX_MODE) {
- printk ("Full duplex\n");
+ printk (KERN_CONT "Full duplex\n");
} else {
- printk ("Half duplex\n");
+ printk (KERN_CONT "Half duplex\n");
}
}
if (np->tx_flow)
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
index 5e3356f8eb5a..5b8cbdb4b520 100644
--- a/drivers/net/e1000/e1000_main.c
+++ b/drivers/net/e1000/e1000_main.c
@@ -2185,12 +2185,16 @@ static void e1000_clean_rx_ring(struct e1000_adapter *adapter,
/* Free all the Rx ring sk_buffs */
for (i = 0; i < rx_ring->count; i++) {
buffer_info = &rx_ring->buffer_info[i];
- if (buffer_info->skb) {
+ if (buffer_info->dma) {
pci_unmap_single(pdev,
buffer_info->dma,
buffer_info->length,
PCI_DMA_FROMDEVICE);
+ }
+
+ buffer_info->dma = 0;
+ if (buffer_info->skb) {
dev_kfree_skb(buffer_info->skb);
buffer_info->skb = NULL;
}
@@ -4033,6 +4037,7 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
buffer_info->dma,
buffer_info->length,
PCI_DMA_FROMDEVICE);
+ buffer_info->dma = 0;
length = le16_to_cpu(rx_desc->length);
/* !EOP means multiple descriptors were used to store a single
@@ -4222,6 +4227,7 @@ map_skb:
pci_unmap_single(pdev, buffer_info->dma,
adapter->rx_buffer_len,
PCI_DMA_FROMDEVICE);
+ buffer_info->dma = 0;
break; /* while !buffer_info->skb */
}
@@ -4817,6 +4823,9 @@ static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
netif_device_detach(netdev);
+ if (state == pci_channel_io_perm_failure)
+ return PCI_ERS_RESULT_DISCONNECT;
+
if (netif_running(netdev))
e1000_down(adapter);
pci_disable_device(pdev);
diff --git a/drivers/net/e1000e/defines.h b/drivers/net/e1000e/defines.h
index 8890c97e1120..c0f185beb8bc 100644
--- a/drivers/net/e1000e/defines.h
+++ b/drivers/net/e1000e/defines.h
@@ -238,6 +238,7 @@
#define E1000_STATUS_SPEED_100 0x00000040 /* Speed 100Mb/s */
#define E1000_STATUS_SPEED_1000 0x00000080 /* Speed 1000Mb/s */
#define E1000_STATUS_LAN_INIT_DONE 0x00000200 /* Lan Init Completion by NVM */
+#define E1000_STATUS_PHYRA 0x00000400 /* PHY Reset Asserted */
#define E1000_STATUS_GIO_MASTER_ENABLE 0x00080000 /* Status of Master requests. */
/* Constants used to interpret the masked PCI-X bus speed. */
@@ -575,6 +576,8 @@
#define PHY_1000T_STATUS 0x0A /* 1000Base-T Status Reg */
#define PHY_EXT_STATUS 0x0F /* Extended Status Reg */
+#define PHY_CONTROL_LB 0x4000 /* PHY Loopback bit */
+
/* NVM Control */
#define E1000_EECD_SK 0x00000001 /* NVM Clock */
#define E1000_EECD_CS 0x00000002 /* NVM Chip Select */
diff --git a/drivers/net/e1000e/hw.h b/drivers/net/e1000e/hw.h
index 163c1c0cfee7..fd44d9f90769 100644
--- a/drivers/net/e1000e/hw.h
+++ b/drivers/net/e1000e/hw.h
@@ -215,6 +215,7 @@ enum e1e_registers {
E1000_SWSM = 0x05B50, /* SW Semaphore */
E1000_FWSM = 0x05B54, /* FW Semaphore */
E1000_SWSM2 = 0x05B58, /* Driver-only SW semaphore */
+ E1000_CRC_OFFSET = 0x05F50, /* CRC Offset register */
E1000_HICR = 0x08F00, /* Host Interface Control */
};
@@ -302,6 +303,9 @@ enum e1e_registers {
#define E1000_KMRNCTRLSTA_REN 0x00200000
#define E1000_KMRNCTRLSTA_DIAG_OFFSET 0x3 /* Kumeran Diagnostic */
#define E1000_KMRNCTRLSTA_DIAG_NELPBK 0x1000 /* Nearend Loopback mode */
+#define E1000_KMRNCTRLSTA_K1_CONFIG 0x7
+#define E1000_KMRNCTRLSTA_K1_ENABLE 0x140E
+#define E1000_KMRNCTRLSTA_K1_DISABLE 0x1400
#define IFE_PHY_EXTENDED_STATUS_CONTROL 0x10
#define IFE_PHY_SPECIAL_CONTROL 0x11 /* 100BaseTx PHY Special Control */
diff --git a/drivers/net/e1000e/ich8lan.c b/drivers/net/e1000e/ich8lan.c
index 9e23f50fb9cd..d56c7473144a 100644
--- a/drivers/net/e1000e/ich8lan.c
+++ b/drivers/net/e1000e/ich8lan.c
@@ -338,6 +338,7 @@ static s32 e1000_init_nvm_params_ich8lan(struct e1000_hw *hw)
{
struct e1000_nvm_info *nvm = &hw->nvm;
struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
+ union ich8_hws_flash_status hsfsts;
u32 gfpreg;
u32 sector_base_addr;
u32 sector_end_addr;
@@ -374,6 +375,20 @@ static s32 e1000_init_nvm_params_ich8lan(struct e1000_hw *hw)
/* Adjust to word count */
nvm->flash_bank_size /= sizeof(u16);
+ /*
+ * Make sure the flash bank size does not overwrite the 4k
+ * sector ranges. We may have 64k allotted to us but we only care
+ * about the first 2 4k sectors. Therefore, if we have anything less
+ * than 64k set in the HSFSTS register, we will reduce the bank size
+ * down to 4k and let the rest remain unused. If berasesz == 3, then
+ * we are working in 64k mode. Otherwise we are not.
+ */
+ if (nvm->flash_bank_size > E1000_ICH8_SHADOW_RAM_WORDS) {
+ hsfsts.regval = er16flash(ICH_FLASH_HSFSTS);
+ if (hsfsts.hsf_status.berasesz != 3)
+ nvm->flash_bank_size = E1000_ICH8_SHADOW_RAM_WORDS;
+ }
+
nvm->word_size = E1000_ICH8_SHADOW_RAM_WORDS;
/* Clear shadow ram */
@@ -446,6 +461,95 @@ static s32 e1000_init_mac_params_ich8lan(struct e1000_adapter *adapter)
return 0;
}
+/**
+ * e1000_check_for_copper_link_ich8lan - Check for link (Copper)
+ * @hw: pointer to the HW structure
+ *
+ * Checks to see of the link status of the hardware has changed. If a
+ * change in link status has been detected, then we read the PHY registers
+ * to get the current speed/duplex if link exists.
+ **/
+static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
+{
+ struct e1000_mac_info *mac = &hw->mac;
+ s32 ret_val;
+ bool link;
+
+ /*
+ * We only want to go out to the PHY registers to see if Auto-Neg
+ * has completed and/or if our link status has changed. The
+ * get_link_status flag is set upon receiving a Link Status
+ * Change or Rx Sequence Error interrupt.
+ */
+ if (!mac->get_link_status) {
+ ret_val = 0;
+ goto out;
+ }
+
+ if (hw->mac.type == e1000_pchlan) {
+ ret_val = e1000e_write_kmrn_reg(hw,
+ E1000_KMRNCTRLSTA_K1_CONFIG,
+ E1000_KMRNCTRLSTA_K1_ENABLE);
+ if (ret_val)
+ goto out;
+ }
+
+ /*
+ * First we want to see if the MII Status Register reports
+ * link. If so, then we want to get the current speed/duplex
+ * of the PHY.
+ */
+ ret_val = e1000e_phy_has_link_generic(hw, 1, 0, &link);
+ if (ret_val)
+ goto out;
+
+ if (!link)
+ goto out; /* No link detected */
+
+ mac->get_link_status = false;
+
+ if (hw->phy.type == e1000_phy_82578) {
+ ret_val = e1000_link_stall_workaround_hv(hw);
+ if (ret_val)
+ goto out;
+ }
+
+ /*
+ * Check if there was DownShift, must be checked
+ * immediately after link-up
+ */
+ e1000e_check_downshift(hw);
+
+ /*
+ * If we are forcing speed/duplex, then we simply return since
+ * we have already determined whether we have link or not.
+ */
+ if (!mac->autoneg) {
+ ret_val = -E1000_ERR_CONFIG;
+ goto out;
+ }
+
+ /*
+ * Auto-Neg is enabled. Auto Speed Detection takes care
+ * of MAC speed/duplex configuration. So we only need to
+ * configure Collision Distance in the MAC.
+ */
+ e1000e_config_collision_dist(hw);
+
+ /*
+ * Configure Flow Control now that Auto-Neg has completed.
+ * First, we need to restore the desired flow control
+ * settings because we may have had to re-autoneg with a
+ * different link partner.
+ */
+ ret_val = e1000e_config_fc_after_link_up(hw);
+ if (ret_val)
+ hw_dbg(hw, "Error configuring flow control\n");
+
+out:
+ return ret_val;
+}
+
static s32 e1000_get_variants_ich8lan(struct e1000_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
@@ -694,6 +798,38 @@ static s32 e1000_hv_phy_workarounds_ich8lan(struct e1000_hw *hw)
}
/**
+ * e1000_lan_init_done_ich8lan - Check for PHY config completion
+ * @hw: pointer to the HW structure
+ *
+ * Check the appropriate indication the MAC has finished configuring the
+ * PHY after a software reset.
+ **/
+static void e1000_lan_init_done_ich8lan(struct e1000_hw *hw)
+{
+ u32 data, loop = E1000_ICH8_LAN_INIT_TIMEOUT;
+
+ /* Wait for basic configuration completes before proceeding */
+ do {
+ data = er32(STATUS);
+ data &= E1000_STATUS_LAN_INIT_DONE;
+ udelay(100);
+ } while ((!data) && --loop);
+
+ /*
+ * If basic configuration is incomplete before the above loop
+ * count reaches 0, loading the configuration from NVM will
+ * leave the PHY in a bad state possibly resulting in no link.
+ */
+ if (loop == 0)
+ hw_dbg(hw, "LAN_INIT_DONE not set, increase timeout\n");
+
+ /* Clear the Init Done bit for the next init event */
+ data = er32(STATUS);
+ data &= ~E1000_STATUS_LAN_INIT_DONE;
+ ew32(STATUS, data);
+}
+
+/**
* e1000_phy_hw_reset_ich8lan - Performs a PHY reset
* @hw: pointer to the HW structure
*
@@ -707,13 +843,15 @@ static s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw)
u32 i;
u32 data, cnf_size, cnf_base_addr, sw_cfg_mask;
s32 ret_val;
- u16 loop = E1000_ICH8_LAN_INIT_TIMEOUT;
u16 word_addr, reg_data, reg_addr, phy_page = 0;
ret_val = e1000e_phy_hw_reset_generic(hw);
if (ret_val)
return ret_val;
+ /* Allow time for h/w to get to a quiescent state after reset */
+ mdelay(10);
+
if (hw->mac.type == e1000_pchlan) {
ret_val = e1000_hv_phy_workarounds_ich8lan(hw);
if (ret_val)
@@ -741,26 +879,8 @@ static s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw)
if (!(data & sw_cfg_mask))
return 0;
- /* Wait for basic configuration completes before proceeding*/
- do {
- data = er32(STATUS);
- data &= E1000_STATUS_LAN_INIT_DONE;
- udelay(100);
- } while ((!data) && --loop);
-
- /*
- * If basic configuration is incomplete before the above loop
- * count reaches 0, loading the configuration from NVM will
- * leave the PHY in a bad state possibly resulting in no link.
- */
- if (loop == 0) {
- hw_dbg(hw, "LAN_INIT_DONE not set, increase timeout\n");
- }
-
- /* Clear the Init Done bit for the next init event */
- data = er32(STATUS);
- data &= ~E1000_STATUS_LAN_INIT_DONE;
- ew32(STATUS, data);
+ /* Wait for basic configuration completes before proceeding */
+ e1000_lan_init_done_ich8lan(hw);
/*
* Make sure HW does not configure LCD from PHY
@@ -961,12 +1081,14 @@ static s32 e1000_set_d0_lplu_state_ich8lan(struct e1000_hw *hw, bool active)
phy_ctrl |= E1000_PHY_CTRL_D0A_LPLU;
ew32(PHY_CTRL, phy_ctrl);
+ if (phy->type != e1000_phy_igp_3)
+ return 0;
+
/*
* Call gig speed drop workaround on LPLU before accessing
* any PHY registers
*/
- if ((hw->mac.type == e1000_ich8lan) &&
- (hw->phy.type == e1000_phy_igp_3))
+ if (hw->mac.type == e1000_ich8lan)
e1000e_gig_downshift_workaround_ich8lan(hw);
/* When LPLU is enabled, we should disable SmartSpeed */
@@ -979,6 +1101,9 @@ static s32 e1000_set_d0_lplu_state_ich8lan(struct e1000_hw *hw, bool active)
phy_ctrl &= ~E1000_PHY_CTRL_D0A_LPLU;
ew32(PHY_CTRL, phy_ctrl);
+ if (phy->type != e1000_phy_igp_3)
+ return 0;
+
/*
* LPLU and SmartSpeed are mutually exclusive. LPLU is used
* during Dx states where the power conservation is most
@@ -1038,6 +1163,10 @@ static s32 e1000_set_d3_lplu_state_ich8lan(struct e1000_hw *hw, bool active)
if (!active) {
phy_ctrl &= ~E1000_PHY_CTRL_NOND0A_LPLU;
ew32(PHY_CTRL, phy_ctrl);
+
+ if (phy->type != e1000_phy_igp_3)
+ return 0;
+
/*
* LPLU and SmartSpeed are mutually exclusive. LPLU is used
* during Dx states where the power conservation is most
@@ -1073,12 +1202,14 @@ static s32 e1000_set_d3_lplu_state_ich8lan(struct e1000_hw *hw, bool active)
phy_ctrl |= E1000_PHY_CTRL_NOND0A_LPLU;
ew32(PHY_CTRL, phy_ctrl);
+ if (phy->type != e1000_phy_igp_3)
+ return 0;
+
/*
* Call gig speed drop workaround on LPLU before accessing
* any PHY registers
*/
- if ((hw->mac.type == e1000_ich8lan) &&
- (hw->phy.type == e1000_phy_igp_3))
+ if (hw->mac.type == e1000_ich8lan)
e1000e_gig_downshift_workaround_ich8lan(hw);
/* When LPLU is enabled, we should disable SmartSpeed */
@@ -1905,7 +2036,7 @@ static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank)
break;
case 1:
sector_size = ICH_FLASH_SEG_SIZE_4K;
- iteration = flash_bank_size / ICH_FLASH_SEG_SIZE_4K;
+ iteration = 1;
break;
case 2:
if (hw->mac.type == e1000_ich9lan) {
@@ -1917,7 +2048,7 @@ static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank)
break;
case 3:
sector_size = ICH_FLASH_SEG_SIZE_64K;
- iteration = flash_bank_size / ICH_FLASH_SEG_SIZE_64K;
+ iteration = 1;
break;
default:
return -E1000_ERR_NVM;
@@ -2143,6 +2274,12 @@ static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
ctrl = er32(CTRL);
if (!e1000_check_reset_block(hw)) {
+ /* Clear PHY Reset Asserted bit */
+ if (hw->mac.type >= e1000_pchlan) {
+ u32 status = er32(STATUS);
+ ew32(STATUS, status & ~E1000_STATUS_PHYRA);
+ }
+
/*
* PHY HW reset requires MAC CORE reset at the same
* time to make sure the interface between MAC and the
@@ -2156,23 +2293,34 @@ static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
ew32(CTRL, (ctrl | E1000_CTRL_RST));
msleep(20);
- if (!ret_val) {
- /* release the swflag because it is not reset by
- * hardware reset
- */
+ if (!ret_val)
e1000_release_swflag_ich8lan(hw);
- }
- ret_val = e1000e_get_auto_rd_done(hw);
- if (ret_val) {
- /*
- * When auto config read does not complete, do not
- * return with an error. This can happen in situations
- * where there is no eeprom and prevents getting link.
- */
- hw_dbg(hw, "Auto Read Done did not complete\n");
+ if (ctrl & E1000_CTRL_PHY_RST)
+ ret_val = hw->phy.ops.get_cfg_done(hw);
+
+ if (hw->mac.type >= e1000_ich10lan) {
+ e1000_lan_init_done_ich8lan(hw);
+ } else {
+ ret_val = e1000e_get_auto_rd_done(hw);
+ if (ret_val) {
+ /*
+ * When auto config read does not complete, do not
+ * return with an error. This can happen in situations
+ * where there is no eeprom and prevents getting link.
+ */
+ hw_dbg(hw, "Auto Read Done did not complete\n");
+ }
}
+ /*
+ * For PCH, this write will make sure that any noise
+ * will be detected as a CRC error and be dropped rather than show up
+ * as a bad packet to the DMA engine.
+ */
+ if (hw->mac.type == e1000_pchlan)
+ ew32(CRC_OFFSET, 0x65656565);
+
ew32(IMC, 0xffffffff);
icr = er32(ICR);
@@ -2222,6 +2370,18 @@ static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw)
for (i = 0; i < mac->mta_reg_count; i++)
E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0);
+ /*
+ * The 82578 Rx buffer will stall if wakeup is enabled in host and
+ * the ME. Reading the BM_WUC register will clear the host wakeup bit.
+ * Reset the phy after disabling host wakeup to reset the Rx buffer.
+ */
+ if (hw->phy.type == e1000_phy_82578) {
+ hw->phy.ops.read_phy_reg(hw, BM_WUC, &i);
+ ret_val = e1000_phy_hw_reset_ich8lan(hw);
+ if (ret_val)
+ return ret_val;
+ }
+
/* Setup link and flow control */
ret_val = e1000_setup_link_ich8lan(hw);
@@ -2254,16 +2414,6 @@ static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw)
ew32(CTRL_EXT, ctrl_ext);
/*
- * The 82578 Rx buffer will stall if wakeup is enabled in host and
- * the ME. Reading the BM_WUC register will clear the host wakeup bit.
- * Reset the phy after disabling host wakeup to reset the Rx buffer.
- */
- if (hw->phy.type == e1000_phy_82578) {
- e1e_rphy(hw, BM_WUC, &i);
- e1000e_phy_hw_reset_generic(hw);
- }
-
- /*
* Clear all of the statistics registers (clear on read). It is
* important that we do this after we have tried to establish link
* because the symbol error count will increment wildly if there
@@ -2485,6 +2635,14 @@ static s32 e1000_get_link_up_info_ich8lan(struct e1000_hw *hw, u16 *speed,
if (ret_val)
return ret_val;
+ if ((hw->mac.type == e1000_pchlan) && (*speed == SPEED_1000)) {
+ ret_val = e1000e_write_kmrn_reg(hw,
+ E1000_KMRNCTRLSTA_K1_CONFIG,
+ E1000_KMRNCTRLSTA_K1_DISABLE);
+ if (ret_val)
+ return ret_val;
+ }
+
if ((hw->mac.type == e1000_ich8lan) &&
(hw->phy.type == e1000_phy_igp_3) &&
(*speed == SPEED_1000)) {
@@ -2850,6 +3008,16 @@ static s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw)
{
u32 bank = 0;
+ if (hw->mac.type >= e1000_pchlan) {
+ u32 status = er32(STATUS);
+
+ if (status & E1000_STATUS_PHYRA)
+ ew32(STATUS, status & ~E1000_STATUS_PHYRA);
+ else
+ hw_dbg(hw,
+ "PHY Reset Asserted not set - needs delay\n");
+ }
+
e1000e_get_cfg_done(hw);
/* If EEPROM is not marked present, init the IGP 3 PHY manually */
@@ -2921,7 +3089,7 @@ static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw)
static struct e1000_mac_operations ich8_mac_ops = {
.id_led_init = e1000e_id_led_init,
.check_mng_mode = e1000_check_mng_mode_ich8lan,
- .check_for_link = e1000e_check_for_copper_link,
+ .check_for_link = e1000_check_for_copper_link_ich8lan,
/* cleanup_led dependent on mac type */
.clear_hw_cntrs = e1000_clear_hw_cntrs_ich8lan,
.get_bus_info = e1000_get_bus_info_ich8lan,
diff --git a/drivers/net/e1000e/lib.c b/drivers/net/e1000e/lib.c
index be6d9e990374..99ba2b8a2a05 100644
--- a/drivers/net/e1000e/lib.c
+++ b/drivers/net/e1000e/lib.c
@@ -378,12 +378,6 @@ s32 e1000e_check_for_copper_link(struct e1000_hw *hw)
mac->get_link_status = 0;
- if (hw->phy.type == e1000_phy_82578) {
- ret_val = e1000_link_stall_workaround_hv(hw);
- if (ret_val)
- return ret_val;
- }
-
/*
* Check if there was DownShift, must be checked
* immediately after link-up
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c
index 677f60490f67..63415bb6f48f 100644
--- a/drivers/net/e1000e/netdev.c
+++ b/drivers/net/e1000e/netdev.c
@@ -1997,7 +1997,7 @@ static int e1000_clean(struct napi_struct *napi, int budget)
struct e1000_adapter *adapter = container_of(napi, struct e1000_adapter, napi);
struct e1000_hw *hw = &adapter->hw;
struct net_device *poll_dev = adapter->netdev;
- int tx_cleaned = 0, work_done = 0;
+ int tx_cleaned = 1, work_done = 0;
adapter = netdev_priv(poll_dev);
@@ -4785,6 +4785,9 @@ static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
netif_device_detach(netdev);
+ if (state == pci_channel_io_perm_failure)
+ return PCI_ERS_RESULT_DISCONNECT;
+
if (netif_running(netdev))
e1000e_down(adapter);
pci_disable_device(pdev);
diff --git a/drivers/net/e1000e/phy.c b/drivers/net/e1000e/phy.c
index e23459cf3d0e..994401fd0664 100644
--- a/drivers/net/e1000e/phy.c
+++ b/drivers/net/e1000e/phy.c
@@ -1531,7 +1531,12 @@ s32 e1000e_phy_has_link_generic(struct e1000_hw *hw, u32 iterations,
*/
ret_val = e1e_rphy(hw, PHY_STATUS, &phy_status);
if (ret_val)
- break;
+ /*
+ * If the first read fails, another entity may have
+ * ownership of the resources, wait and try again to
+ * see if they have relinquished the resources yet.
+ */
+ udelay(usec_interval);
ret_val = e1e_rphy(hw, PHY_STATUS, &phy_status);
if (ret_val)
break;
@@ -2737,6 +2742,11 @@ s32 e1000_link_stall_workaround_hv(struct e1000_hw *hw)
if (hw->phy.type != e1000_phy_82578)
goto out;
+ /* Do not apply workaround if in PHY loopback bit 14 set */
+ hw->phy.ops.read_phy_reg(hw, PHY_CONTROL, &data);
+ if (data & PHY_CONTROL_LB)
+ goto out;
+
/* check if link is up and at 1Gbps */
ret_val = hw->phy.ops.read_phy_reg(hw, BM_CS_STATUS, &data);
if (ret_val)
diff --git a/drivers/net/epic100.c b/drivers/net/epic100.c
index b60e27dfcfa7..88d7ebf31220 100644
--- a/drivers/net/epic100.c
+++ b/drivers/net/epic100.c
@@ -338,8 +338,7 @@ static int __devinit epic_init_one (struct pci_dev *pdev,
#ifndef MODULE
static int printed_version;
if (!printed_version++)
- printk (KERN_INFO "%s" KERN_INFO "%s",
- version, version2);
+ printk(KERN_INFO "%s%s", version, version2);
#endif
card_idx++;
@@ -1600,7 +1599,7 @@ static int __init epic_init (void)
{
/* when a module, this is printed whether or not devices are found in probe */
#ifdef MODULE
- printk (KERN_INFO "%s" KERN_INFO "%s",
+ printk (KERN_INFO "%s%s",
version, version2);
#endif
diff --git a/drivers/net/fealnx.c b/drivers/net/fealnx.c
index 891be28a7d4f..48385c42ab57 100644
--- a/drivers/net/fealnx.c
+++ b/drivers/net/fealnx.c
@@ -1209,17 +1209,20 @@ static void fealnx_tx_timeout(struct net_device *dev)
unsigned long flags;
int i;
- printk(KERN_WARNING "%s: Transmit timed out, status %8.8x,"
- " resetting...\n", dev->name, ioread32(ioaddr + ISR));
+ printk(KERN_WARNING
+ "%s: Transmit timed out, status %8.8x, resetting...\n",
+ dev->name, ioread32(ioaddr + ISR));
{
printk(KERN_DEBUG " Rx ring %p: ", np->rx_ring);
for (i = 0; i < RX_RING_SIZE; i++)
- printk(" %8.8x", (unsigned int) np->rx_ring[i].status);
- printk("\n" KERN_DEBUG " Tx ring %p: ", np->tx_ring);
+ printk(KERN_CONT " %8.8x",
+ (unsigned int) np->rx_ring[i].status);
+ printk(KERN_CONT "\n");
+ printk(KERN_DEBUG " Tx ring %p: ", np->tx_ring);
for (i = 0; i < TX_RING_SIZE; i++)
- printk(" %4.4x", np->tx_ring[i].status);
- printk("\n");
+ printk(KERN_CONT " %4.4x", np->tx_ring[i].status);
+ printk(KERN_CONT "\n");
}
spin_lock_irqsave(&np->lock, flags);
diff --git a/drivers/net/fec.h b/drivers/net/fec.h
index 30b7dd671336..cc47f3f057c7 100644
--- a/drivers/net/fec.h
+++ b/drivers/net/fec.h
@@ -46,12 +46,12 @@
#else
-#define FEC_ECNTRL; 0x000 /* Ethernet control reg */
-#define FEC_IEVENT; 0x004 /* Interrupt even reg */
-#define FEC_IMASK; 0x008 /* Interrupt mask reg */
-#define FEC_IVEC; 0x00c /* Interrupt vec status reg */
-#define FEC_R_DES_ACTIVE; 0x010 /* Receive descriptor reg */
-#define FEC_X_DES_ACTIVE; 0x01c /* Transmit descriptor reg */
+#define FEC_ECNTRL 0x000 /* Ethernet control reg */
+#define FEC_IEVENT 0x004 /* Interrupt even reg */
+#define FEC_IMASK 0x008 /* Interrupt mask reg */
+#define FEC_IVEC 0x00c /* Interrupt vec status reg */
+#define FEC_R_DES_ACTIVE 0x010 /* Receive descriptor reg */
+#define FEC_X_DES_ACTIVE 0x014 /* Transmit descriptor reg */
#define FEC_MII_DATA 0x040 /* MII manage frame reg */
#define FEC_MII_SPEED 0x044 /* MII speed control reg */
#define FEC_R_BOUND 0x08c /* FIFO receive bound reg */
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c
index 1094d292630f..3b4e0766c7b2 100644
--- a/drivers/net/forcedeth.c
+++ b/drivers/net/forcedeth.c
@@ -3514,11 +3514,13 @@ static irqreturn_t nv_nic_irq(int foo, void *data)
nv_msi_workaround(np);
#ifdef CONFIG_FORCEDETH_NAPI
- napi_schedule(&np->napi);
-
- /* Disable furthur irq's
- (msix not enabled with napi) */
- writel(0, base + NvRegIrqMask);
+ if (napi_schedule_prep(&np->napi)) {
+ /*
+ * Disable further irq's (msix not enabled with napi)
+ */
+ writel(0, base + NvRegIrqMask);
+ __napi_schedule(&np->napi);
+ }
#else
do
@@ -3615,12 +3617,13 @@ static irqreturn_t nv_nic_irq_optimized(int foo, void *data)
nv_msi_workaround(np);
#ifdef CONFIG_FORCEDETH_NAPI
- napi_schedule(&np->napi);
-
- /* Disable furthur irq's
- (msix not enabled with napi) */
- writel(0, base + NvRegIrqMask);
-
+ if (napi_schedule_prep(&np->napi)) {
+ /*
+ * Disable further irq's (msix not enabled with napi)
+ */
+ writel(0, base + NvRegIrqMask);
+ __napi_schedule(&np->napi);
+ }
#else
do
{
diff --git a/drivers/net/fsl_pq_mdio.c b/drivers/net/fsl_pq_mdio.c
index 3af581303ca2..d167090248e2 100644
--- a/drivers/net/fsl_pq_mdio.c
+++ b/drivers/net/fsl_pq_mdio.c
@@ -188,7 +188,7 @@ static int fsl_pq_mdio_find_free(struct mii_bus *new_bus)
}
-#ifdef CONFIG_GIANFAR
+#if defined(CONFIG_GIANFAR) || defined(CONFIG_GIANFAR_MODULE)
static u32 __iomem *get_gfar_tbipa(struct fsl_pq_mdio __iomem *regs)
{
struct gfar __iomem *enet_regs;
@@ -206,7 +206,7 @@ static u32 __iomem *get_gfar_tbipa(struct fsl_pq_mdio __iomem *regs)
#endif
-#ifdef CONFIG_UCC_GETH
+#if defined(CONFIG_UCC_GETH) || defined(CONFIG_UCC_GETH_MODULE)
static int get_ucc_id_for_range(u64 start, u64 end, u32 *ucc_id)
{
struct device_node *np = NULL;
@@ -291,7 +291,7 @@ static int fsl_pq_mdio_probe(struct of_device *ofdev,
if (of_device_is_compatible(np, "fsl,gianfar-mdio") ||
of_device_is_compatible(np, "fsl,gianfar-tbi") ||
of_device_is_compatible(np, "gianfar")) {
-#ifdef CONFIG_GIANFAR
+#if defined(CONFIG_GIANFAR) || defined(CONFIG_GIANFAR_MODULE)
tbipa = get_gfar_tbipa(regs);
#else
err = -ENODEV;
@@ -299,7 +299,7 @@ static int fsl_pq_mdio_probe(struct of_device *ofdev,
#endif
} else if (of_device_is_compatible(np, "fsl,ucc-mdio") ||
of_device_is_compatible(np, "ucc_geth_phy")) {
-#ifdef CONFIG_UCC_GETH
+#if defined(CONFIG_UCC_GETH) || defined(CONFIG_UCC_GETH_MODULE)
u32 id;
static u32 mii_mng_master;
diff --git a/drivers/net/hamachi.c b/drivers/net/hamachi.c
index 9d5b62cb30f7..d62378cbc149 100644
--- a/drivers/net/hamachi.c
+++ b/drivers/net/hamachi.c
@@ -173,8 +173,8 @@ static int tx_params[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
static const char version[] __devinitconst =
KERN_INFO DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " Written by Donald Becker\n"
-KERN_INFO " Some modifications by Eric kasten <kasten@nscl.msu.edu>\n"
-KERN_INFO " Further modifications by Keith Underwood <keithu@parl.clemson.edu>\n";
+" Some modifications by Eric kasten <kasten@nscl.msu.edu>\n"
+" Further modifications by Keith Underwood <keithu@parl.clemson.edu>\n";
/* IP_MF appears to be only defined in <netinet/ip.h>, however,
@@ -1080,11 +1080,14 @@ static void hamachi_tx_timeout(struct net_device *dev)
{
printk(KERN_DEBUG " Rx ring %p: ", hmp->rx_ring);
for (i = 0; i < RX_RING_SIZE; i++)
- printk(" %8.8x", le32_to_cpu(hmp->rx_ring[i].status_n_length));
- printk("\n"KERN_DEBUG" Tx ring %p: ", hmp->tx_ring);
+ printk(KERN_CONT " %8.8x",
+ le32_to_cpu(hmp->rx_ring[i].status_n_length));
+ printk(KERN_CONT "\n");
+ printk(KERN_DEBUG" Tx ring %p: ", hmp->tx_ring);
for (i = 0; i < TX_RING_SIZE; i++)
- printk(" %4.4x", le32_to_cpu(hmp->tx_ring[i].status_n_length));
- printk("\n");
+ printk(KERN_CONT " %4.4x",
+ le32_to_cpu(hmp->tx_ring[i].status_n_length));
+ printk(KERN_CONT "\n");
}
/* Reinit the hardware and make sure the Rx and Tx processes
@@ -1753,13 +1756,13 @@ static int hamachi_close(struct net_device *dev)
#ifdef __i386__
if (hamachi_debug > 2) {
- printk("\n"KERN_DEBUG" Tx ring at %8.8x:\n",
+ printk(KERN_DEBUG " Tx ring at %8.8x:\n",
(int)hmp->tx_ring_dma);
for (i = 0; i < TX_RING_SIZE; i++)
- printk(" %c #%d desc. %8.8x %8.8x.\n",
+ printk(KERN_DEBUG " %c #%d desc. %8.8x %8.8x.\n",
readl(ioaddr + TxCurPtr) == (long)&hmp->tx_ring[i] ? '>' : ' ',
i, hmp->tx_ring[i].status_n_length, hmp->tx_ring[i].addr);
- printk("\n"KERN_DEBUG " Rx ring %8.8x:\n",
+ printk(KERN_DEBUG " Rx ring %8.8x:\n",
(int)hmp->rx_ring_dma);
for (i = 0; i < RX_RING_SIZE; i++) {
printk(KERN_DEBUG " %c #%d desc. %4.4x %8.8x\n",
@@ -1770,7 +1773,7 @@ static int hamachi_close(struct net_device *dev)
u16 *addr = (u16 *)
hmp->rx_skbuff[i]->data;
int j;
-
+ printk(KERN_DEBUG "Addr: ");
for (j = 0; j < 0x50; j++)
printk(" %4.4x", addr[j]);
printk("\n");
diff --git a/drivers/net/hamradio/baycom_epp.c b/drivers/net/hamradio/baycom_epp.c
index 5e4b7afd0683..352703255bba 100644
--- a/drivers/net/hamradio/baycom_epp.c
+++ b/drivers/net/hamradio/baycom_epp.c
@@ -68,7 +68,7 @@ static const char paranoia_str[] = KERN_ERR
static const char bc_drvname[] = "baycom_epp";
static const char bc_drvinfo[] = KERN_INFO "baycom_epp: (C) 1998-2000 Thomas Sailer, HB9JNX/AE4WA\n"
-KERN_INFO "baycom_epp: version 0.7 compiled " __TIME__ " " __DATE__ "\n";
+"baycom_epp: version 0.7 compiled " __TIME__ " " __DATE__ "\n";
/* --------------------------------------------------------------------- */
diff --git a/drivers/net/hamradio/baycom_par.c b/drivers/net/hamradio/baycom_par.c
index 2e6fc4dc74b1..5f5af9a606f8 100644
--- a/drivers/net/hamradio/baycom_par.c
+++ b/drivers/net/hamradio/baycom_par.c
@@ -102,7 +102,7 @@
static const char bc_drvname[] = "baycom_par";
static const char bc_drvinfo[] = KERN_INFO "baycom_par: (C) 1996-2000 Thomas Sailer, HB9JNX/AE4WA\n"
-KERN_INFO "baycom_par: version 0.9 compiled " __TIME__ " " __DATE__ "\n";
+"baycom_par: version 0.9 compiled " __TIME__ " " __DATE__ "\n";
/* --------------------------------------------------------------------- */
diff --git a/drivers/net/hamradio/baycom_ser_fdx.c b/drivers/net/hamradio/baycom_ser_fdx.c
index b6a816e60c0f..aa4488e871b2 100644
--- a/drivers/net/hamradio/baycom_ser_fdx.c
+++ b/drivers/net/hamradio/baycom_ser_fdx.c
@@ -91,7 +91,7 @@
static const char bc_drvname[] = "baycom_ser_fdx";
static const char bc_drvinfo[] = KERN_INFO "baycom_ser_fdx: (C) 1996-2000 Thomas Sailer, HB9JNX/AE4WA\n"
-KERN_INFO "baycom_ser_fdx: version 0.10 compiled " __TIME__ " " __DATE__ "\n";
+"baycom_ser_fdx: version 0.10 compiled " __TIME__ " " __DATE__ "\n";
/* --------------------------------------------------------------------- */
diff --git a/drivers/net/hamradio/baycom_ser_hdx.c b/drivers/net/hamradio/baycom_ser_hdx.c
index 3bcc57acbe6d..88c593596020 100644
--- a/drivers/net/hamradio/baycom_ser_hdx.c
+++ b/drivers/net/hamradio/baycom_ser_hdx.c
@@ -79,7 +79,7 @@
static const char bc_drvname[] = "baycom_ser_hdx";
static const char bc_drvinfo[] = KERN_INFO "baycom_ser_hdx: (C) 1996-2000 Thomas Sailer, HB9JNX/AE4WA\n"
-KERN_INFO "baycom_ser_hdx: version 0.10 compiled " __TIME__ " " __DATE__ "\n";
+"baycom_ser_hdx: version 0.10 compiled " __TIME__ " " __DATE__ "\n";
/* --------------------------------------------------------------------- */
diff --git a/drivers/net/igb/e1000_82575.c b/drivers/net/igb/e1000_82575.c
index efd9be214885..ac28dd5a4fd1 100644
--- a/drivers/net/igb/e1000_82575.c
+++ b/drivers/net/igb/e1000_82575.c
@@ -190,6 +190,10 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw)
phy->ops.write_reg = igb_write_phy_reg_igp;
}
+ /* set lan id */
+ hw->bus.func = (rd32(E1000_STATUS) & E1000_STATUS_FUNC_MASK) >>
+ E1000_STATUS_FUNC_SHIFT;
+
/* Set phy->phy_addr and phy->id. */
ret_val = igb_get_phy_id_82575(hw);
if (ret_val)
diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c
index ea17319624aa..be480292aba1 100644
--- a/drivers/net/igb/igb_main.c
+++ b/drivers/net/igb/igb_main.c
@@ -4549,11 +4549,12 @@ static bool igb_clean_rx_irq_adv(struct igb_ring *rx_ring,
cleaned = true;
cleaned_count++;
+ /* this is the fast path for the non-packet split case */
if (!adapter->rx_ps_hdr_size) {
pci_unmap_single(pdev, buffer_info->dma,
- adapter->rx_buffer_len +
- NET_IP_ALIGN,
+ adapter->rx_buffer_len,
PCI_DMA_FROMDEVICE);
+ buffer_info->dma = 0;
skb_put(skb, length);
goto send_up;
}
@@ -4570,8 +4571,9 @@ static bool igb_clean_rx_irq_adv(struct igb_ring *rx_ring,
if (!skb_shinfo(skb)->nr_frags) {
pci_unmap_single(pdev, buffer_info->dma,
- adapter->rx_ps_hdr_size + NET_IP_ALIGN,
+ adapter->rx_ps_hdr_size,
PCI_DMA_FROMDEVICE);
+ buffer_info->dma = 0;
skb_put(skb, hlen);
}
@@ -4713,7 +4715,6 @@ static void igb_alloc_rx_buffers_adv(struct igb_ring *rx_ring,
bufsz = adapter->rx_ps_hdr_size;
else
bufsz = adapter->rx_buffer_len;
- bufsz += NET_IP_ALIGN;
while (cleaned_count--) {
rx_desc = E1000_RX_DESC_ADV(*rx_ring, i);
@@ -4737,7 +4738,7 @@ static void igb_alloc_rx_buffers_adv(struct igb_ring *rx_ring,
}
if (!buffer_info->skb) {
- skb = netdev_alloc_skb(netdev, bufsz);
+ skb = netdev_alloc_skb(netdev, bufsz + NET_IP_ALIGN);
if (!skb) {
adapter->alloc_rx_buff_failed++;
goto no_buffers;
@@ -5338,6 +5339,9 @@ static pci_ers_result_t igb_io_error_detected(struct pci_dev *pdev,
netif_device_detach(netdev);
+ if (state == pci_channel_io_perm_failure)
+ return PCI_ERS_RESULT_DISCONNECT;
+
if (netif_running(netdev))
igb_down(adapter);
pci_disable_device(pdev);
diff --git a/drivers/net/irda/bfin_sir.c b/drivers/net/irda/bfin_sir.c
index f3eed6a8fba5..911c082cee5a 100644
--- a/drivers/net/irda/bfin_sir.c
+++ b/drivers/net/irda/bfin_sir.c
@@ -677,6 +677,14 @@ static int bfin_sir_init_iobuf(iobuff_t *io, int size)
return 0;
}
+static const struct net_device_ops bfin_sir_ndo = {
+ .ndo_open = bfin_sir_open,
+ .ndo_stop = bfin_sir_stop,
+ .ndo_start_xmit = bfin_sir_hard_xmit,
+ .ndo_do_ioctl = bfin_sir_ioctl,
+ .ndo_get_stats = bfin_sir_stats,
+};
+
static int __devinit bfin_sir_probe(struct platform_device *pdev)
{
struct net_device *dev;
@@ -718,12 +726,8 @@ static int __devinit bfin_sir_probe(struct platform_device *pdev)
if (err)
goto err_mem_3;
- dev->hard_start_xmit = bfin_sir_hard_xmit;
- dev->open = bfin_sir_open;
- dev->stop = bfin_sir_stop;
- dev->do_ioctl = bfin_sir_ioctl;
- dev->get_stats = bfin_sir_stats;
- dev->irq = sir_port->irq;
+ dev->netdev_ops = &bfin_sir_ndo;
+ dev->irq = sir_port->irq;
irda_init_max_qos_capabilies(&self->qos);
diff --git a/drivers/net/ixgbe/ixgbe_dcb_nl.c b/drivers/net/ixgbe/ixgbe_dcb_nl.c
index d56890f5c9d5..7c5978ad929a 100644
--- a/drivers/net/ixgbe/ixgbe_dcb_nl.c
+++ b/drivers/net/ixgbe/ixgbe_dcb_nl.c
@@ -138,6 +138,10 @@ static u8 ixgbe_dcbnl_set_state(struct net_device *netdev, u8 state)
adapter->hw.fc.requested_mode = ixgbe_fc_none;
}
adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED;
+ if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
+ adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
+ adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
+ }
adapter->flags |= IXGBE_FLAG_DCB_ENABLED;
ixgbe_init_interrupt_scheme(adapter);
if (netif_running(netdev))
@@ -154,6 +158,8 @@ static u8 ixgbe_dcbnl_set_state(struct net_device *netdev, u8 state)
adapter->dcb_cfg.pfc_mode_enable = false;
adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
adapter->flags |= IXGBE_FLAG_RSS_ENABLED;
+ if (adapter->hw.mac.type == ixgbe_mac_82599EB)
+ adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE;
ixgbe_init_interrupt_scheme(adapter);
if (netif_running(netdev))
netdev->netdev_ops->ndo_open(netdev);
diff --git a/drivers/net/ixgbe/ixgbe_ethtool.c b/drivers/net/ixgbe/ixgbe_ethtool.c
index 86f4f3e36f27..2a978008fd6e 100644
--- a/drivers/net/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ixgbe/ixgbe_ethtool.c
@@ -139,7 +139,7 @@ static int ixgbe_get_settings(struct net_device *netdev,
ecmd->autoneg = AUTONEG_ENABLE;
ecmd->transceiver = XCVR_EXTERNAL;
if ((hw->phy.media_type == ixgbe_media_type_copper) ||
- (hw->mac.type == ixgbe_mac_82599EB)) {
+ (hw->phy.multispeed_fiber)) {
ecmd->supported |= (SUPPORTED_1000baseT_Full |
SUPPORTED_Autoneg);
@@ -217,7 +217,7 @@ static int ixgbe_set_settings(struct net_device *netdev,
s32 err = 0;
if ((hw->phy.media_type == ixgbe_media_type_copper) ||
- (hw->mac.type == ixgbe_mac_82599EB)) {
+ (hw->phy.multispeed_fiber)) {
/* 10000/copper and 1000/copper must autoneg
* this function does not support any duplex forcing, but can
* limit the advertising of the adapter to only 10000 or 1000 */
@@ -245,6 +245,7 @@ static int ixgbe_set_settings(struct net_device *netdev,
} else {
/* in this case we currently only support 10Gb/FULL */
if ((ecmd->autoneg == AUTONEG_ENABLE) ||
+ (ecmd->advertising != ADVERTISED_10000baseT_Full) ||
(ecmd->speed + ecmd->duplex != SPEED_10000 + DUPLEX_FULL))
return -EINVAL;
}
@@ -1829,7 +1830,6 @@ static int ixgbe_wol_exclusion(struct ixgbe_adapter *adapter,
break;
default:
wol->supported = 0;
- retval = 0;
}
return retval;
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index e756e220db32..e3442f47f932 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -563,7 +563,6 @@ static void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter,
union ixgbe_adv_rx_desc *rx_desc;
struct ixgbe_rx_buffer *bi;
unsigned int i;
- unsigned int bufsz = rx_ring->rx_buf_len + NET_IP_ALIGN;
i = rx_ring->next_to_use;
bi = &rx_ring->rx_buffer_info[i];
@@ -593,7 +592,9 @@ static void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter,
if (!bi->skb) {
struct sk_buff *skb;
- skb = netdev_alloc_skb(adapter->netdev, bufsz);
+ skb = netdev_alloc_skb(adapter->netdev,
+ (rx_ring->rx_buf_len +
+ NET_IP_ALIGN));
if (!skb) {
adapter->alloc_rx_buff_failed++;
@@ -608,7 +609,8 @@ static void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter,
skb_reserve(skb, NET_IP_ALIGN);
bi->skb = skb;
- bi->dma = pci_map_single(pdev, skb->data, bufsz,
+ bi->dma = pci_map_single(pdev, skb->data,
+ rx_ring->rx_buf_len,
PCI_DMA_FROMDEVICE);
}
/* Refresh the desc even if buffer_addrs didn't change because
@@ -732,6 +734,7 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
pci_unmap_single(pdev, rx_buffer_info->dma,
rx_ring->rx_buf_len,
PCI_DMA_FROMDEVICE);
+ rx_buffer_info->dma = 0;
skb_put(skb, len);
}
@@ -2694,16 +2697,23 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
/*
* For hot-pluggable SFP+ devices, a new SFP+ module may have
- * arrived before interrupts were enabled. We need to kick off
- * the SFP+ module setup first, then try to bring up link.
+ * arrived before interrupts were enabled but after probe. Such
+ * devices wouldn't have their type identified yet. We need to
+ * kick off the SFP+ module setup first, then try to bring up link.
* If we're not hot-pluggable SFP+, we just need to configure link
* and bring it up.
*/
- err = hw->phy.ops.identify(hw);
- if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
- DPRINTK(PROBE, ERR, "PHY not supported on this NIC %d\n", err);
- ixgbe_down(adapter);
- return err;
+ if (hw->phy.type == ixgbe_phy_unknown) {
+ err = hw->phy.ops.identify(hw);
+ if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
+ /*
+ * Take the device down and schedule the sfp tasklet
+ * which will unregister_netdev and log it.
+ */
+ ixgbe_down(adapter);
+ schedule_work(&adapter->sfp_config_module_task);
+ return err;
+ }
}
if (ixgbe_is_sfp(hw)) {
@@ -2812,9 +2822,11 @@ static void ixgbe_clean_rx_ring(struct ixgbe_adapter *adapter,
}
if (!rx_buffer_info->page)
continue;
- pci_unmap_page(pdev, rx_buffer_info->page_dma, PAGE_SIZE / 2,
- PCI_DMA_FROMDEVICE);
- rx_buffer_info->page_dma = 0;
+ if (rx_buffer_info->page_dma) {
+ pci_unmap_page(pdev, rx_buffer_info->page_dma,
+ PAGE_SIZE / 2, PCI_DMA_FROMDEVICE);
+ rx_buffer_info->page_dma = 0;
+ }
put_page(rx_buffer_info->page);
rx_buffer_info->page = NULL;
rx_buffer_info->page_offset = 0;
@@ -3118,7 +3130,11 @@ static inline bool ixgbe_set_fcoe_queues(struct ixgbe_adapter *adapter)
#endif
if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
DPRINTK(PROBE, INFO, "FCOE enabled with RSS \n");
- ixgbe_set_rss_queues(adapter);
+ if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) ||
+ (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))
+ ixgbe_set_fdir_queues(adapter);
+ else
+ ixgbe_set_rss_queues(adapter);
}
/* adding FCoE rx rings to the end */
f->mask = adapter->num_rx_queues;
@@ -3376,7 +3392,12 @@ static inline bool ixgbe_cache_ring_fcoe(struct ixgbe_adapter *adapter)
}
#endif /* CONFIG_IXGBE_DCB */
if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
- ixgbe_cache_ring_rss(adapter);
+ if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) ||
+ (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))
+ ixgbe_cache_ring_fdir(adapter);
+ else
+ ixgbe_cache_ring_rss(adapter);
+
fcoe_i = f->mask;
}
for (i = 0; i < f->indices; i++, fcoe_i++)
@@ -3716,14 +3737,15 @@ static void ixgbe_sfp_task(struct work_struct *work)
if ((hw->phy.type == ixgbe_phy_nl) &&
(hw->phy.sfp_type == ixgbe_sfp_type_not_present)) {
s32 ret = hw->phy.ops.identify_sfp(hw);
- if (ret)
+ if (ret == IXGBE_ERR_SFP_NOT_PRESENT)
goto reschedule;
ret = hw->phy.ops.reset(hw);
if (ret == IXGBE_ERR_SFP_NOT_SUPPORTED) {
- DPRINTK(PROBE, ERR, "failed to initialize because an "
- "unsupported SFP+ module type was detected.\n"
- "Reload the driver after installing a "
- "supported module.\n");
+ dev_err(&adapter->pdev->dev, "failed to initialize "
+ "because an unsupported SFP+ module type "
+ "was detected.\n"
+ "Reload the driver after installing a "
+ "supported module.\n");
unregister_netdev(adapter->netdev);
} else {
DPRINTK(PROBE, INFO, "detected SFP+: %d\n",
@@ -4502,7 +4524,8 @@ static void ixgbe_multispeed_fiber_task(struct work_struct *work)
u32 autoneg;
adapter->flags |= IXGBE_FLAG_IN_SFP_LINK_TASK;
- if (hw->mac.ops.get_link_capabilities)
+ autoneg = hw->phy.autoneg_advertised;
+ if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
hw->mac.ops.get_link_capabilities(hw, &autoneg,
&hw->mac.autoneg);
if (hw->mac.ops.setup_link_speed)
@@ -4524,10 +4547,17 @@ static void ixgbe_sfp_config_module_task(struct work_struct *work)
u32 err;
adapter->flags |= IXGBE_FLAG_IN_SFP_MOD_TASK;
+
+ /* Time for electrical oscillations to settle down */
+ msleep(100);
err = hw->phy.ops.identify_sfp(hw);
+
if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
- DPRINTK(PROBE, ERR, "PHY not supported on this NIC %d\n", err);
- ixgbe_down(adapter);
+ dev_err(&adapter->pdev->dev, "failed to initialize because "
+ "an unsupported SFP+ module type was detected.\n"
+ "Reload the driver after installing a supported "
+ "module.\n");
+ unregister_netdev(adapter->netdev);
return;
}
hw->mac.ops.setup_sfp(hw);
@@ -5513,8 +5543,10 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
round_jiffies(jiffies + (2 * HZ)));
err = 0;
} else if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
- dev_err(&adapter->pdev->dev, "failed to load because an "
- "unsupported SFP+ module type was detected.\n");
+ dev_err(&adapter->pdev->dev, "failed to initialize because "
+ "an unsupported SFP+ module type was detected.\n"
+ "Reload the driver after installing a supported "
+ "module.\n");
goto err_sw_init;
} else if (err) {
dev_err(&adapter->pdev->dev, "HW Init failed: %d\n", err);
@@ -5555,12 +5587,6 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
netdev->features |= NETIF_F_FCOE_CRC;
netdev->features |= NETIF_F_FSO;
netdev->fcoe_ddp_xid = IXGBE_FCOE_DDP_MAX - 1;
- DPRINTK(DRV, INFO, "FCoE enabled, "
- "disabling Flow Director\n");
- adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
- adapter->flags &=
- ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
- adapter->atr_sample_rate = 0;
} else {
adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED;
}
diff --git a/drivers/net/ixgbe/ixgbe_phy.c b/drivers/net/ixgbe/ixgbe_phy.c
index 453e966762f0..9ecad17522c3 100644
--- a/drivers/net/ixgbe/ixgbe_phy.c
+++ b/drivers/net/ixgbe/ixgbe_phy.c
@@ -60,6 +60,7 @@ s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw)
if (hw->phy.type == ixgbe_phy_unknown) {
for (phy_addr = 0; phy_addr < IXGBE_MAX_PHY_ADDR; phy_addr++) {
+ hw->phy.mdio.prtad = phy_addr;
if (mdio45_probe(&hw->phy.mdio, phy_addr) == 0) {
ixgbe_get_phy_id(hw);
hw->phy.type =
@@ -68,6 +69,8 @@ s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw)
break;
}
}
+ /* clear value if nothing found */
+ hw->phy.mdio.prtad = 0;
} else {
status = 0;
}
diff --git a/drivers/net/mdio.c b/drivers/net/mdio.c
index dc45e9856c35..6851bdb2ce29 100644
--- a/drivers/net/mdio.c
+++ b/drivers/net/mdio.c
@@ -14,6 +14,10 @@
#include <linux/mdio.h>
#include <linux/module.h>
+MODULE_DESCRIPTION("Generic support for MDIO-compatible transceivers");
+MODULE_AUTHOR("Copyright 2006-2009 Solarflare Communications Inc.");
+MODULE_LICENSE("GPL");
+
/**
* mdio45_probe - probe for an MDIO (clause 45) device
* @mdio: MDIO interface
diff --git a/drivers/net/mlx4/en_netdev.c b/drivers/net/mlx4/en_netdev.c
index e02bafdd3682..93f4abd990a9 100644
--- a/drivers/net/mlx4/en_netdev.c
+++ b/drivers/net/mlx4/en_netdev.c
@@ -668,7 +668,7 @@ int mlx4_en_start_port(struct net_device *dev)
queue_work(mdev->workqueue, &priv->mcast_task);
priv->port_up = true;
- netif_start_queue(dev);
+ netif_tx_start_all_queues(dev);
return 0;
mac_err:
@@ -700,14 +700,14 @@ void mlx4_en_stop_port(struct net_device *dev)
en_dbg(DRV, priv, "stop port called while port already down\n");
return;
}
- netif_stop_queue(dev);
/* Synchronize with tx routine */
netif_tx_lock_bh(dev);
- priv->port_up = false;
+ netif_tx_stop_all_queues(dev);
netif_tx_unlock_bh(dev);
/* close port*/
+ priv->port_up = false;
mlx4_CLOSE_PORT(mdev->dev, priv->port);
/* Unregister Mac address for the port */
@@ -881,7 +881,6 @@ void mlx4_en_destroy_netdev(struct net_device *dev)
mlx4_free_hwq_res(mdev->dev, &priv->res, MLX4_EN_PAGE_SIZE);
cancel_delayed_work(&priv->stats_task);
- cancel_delayed_work(&priv->refill_task);
/* flush any pending task for this netdev */
flush_workqueue(mdev->workqueue);
@@ -986,7 +985,6 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
spin_lock_init(&priv->stats_lock);
INIT_WORK(&priv->mcast_task, mlx4_en_do_set_multicast);
INIT_WORK(&priv->mac_task, mlx4_en_do_set_mac);
- INIT_DELAYED_WORK(&priv->refill_task, mlx4_en_rx_refill);
INIT_WORK(&priv->watchdog_task, mlx4_en_restart);
INIT_WORK(&priv->linkstate_task, mlx4_en_linkstate);
INIT_DELAYED_WORK(&priv->stats_task, mlx4_en_do_get_stats);
diff --git a/drivers/net/mlx4/en_rx.c b/drivers/net/mlx4/en_rx.c
index 5a14899c1e25..91bdfdfd431f 100644
--- a/drivers/net/mlx4/en_rx.c
+++ b/drivers/net/mlx4/en_rx.c
@@ -269,31 +269,6 @@ reduce_rings:
return 0;
}
-static int mlx4_en_fill_rx_buf(struct net_device *dev,
- struct mlx4_en_rx_ring *ring)
-{
- struct mlx4_en_priv *priv = netdev_priv(dev);
- int num = 0;
- int err;
-
- while ((u32) (ring->prod - ring->cons) < ring->actual_size) {
- err = mlx4_en_prepare_rx_desc(priv, ring, ring->prod &
- ring->size_mask);
- if (err) {
- if (netif_msg_rx_err(priv))
- en_warn(priv, "Failed preparing rx descriptor\n");
- priv->port_stats.rx_alloc_failed++;
- break;
- }
- ++num;
- ++ring->prod;
- }
- if ((u32) (ring->prod - ring->cons) == ring->actual_size)
- ring->full = 1;
-
- return num;
-}
-
static void mlx4_en_free_rx_buf(struct mlx4_en_priv *priv,
struct mlx4_en_rx_ring *ring)
{
@@ -312,42 +287,6 @@ static void mlx4_en_free_rx_buf(struct mlx4_en_priv *priv,
}
}
-
-void mlx4_en_rx_refill(struct work_struct *work)
-{
- struct delayed_work *delay = to_delayed_work(work);
- struct mlx4_en_priv *priv = container_of(delay, struct mlx4_en_priv,
- refill_task);
- struct mlx4_en_dev *mdev = priv->mdev;
- struct net_device *dev = priv->dev;
- struct mlx4_en_rx_ring *ring;
- int need_refill = 0;
- int i;
-
- mutex_lock(&mdev->state_lock);
- if (!mdev->device_up || !priv->port_up)
- goto out;
-
- /* We only get here if there are no receive buffers, so we can't race
- * with Rx interrupts while filling buffers */
- for (i = 0; i < priv->rx_ring_num; i++) {
- ring = &priv->rx_ring[i];
- if (ring->need_refill) {
- if (mlx4_en_fill_rx_buf(dev, ring)) {
- ring->need_refill = 0;
- mlx4_en_update_rx_prod_db(ring);
- } else
- need_refill = 1;
- }
- }
- if (need_refill)
- queue_delayed_work(mdev->workqueue, &priv->refill_task, HZ);
-
-out:
- mutex_unlock(&mdev->state_lock);
-}
-
-
int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
struct mlx4_en_rx_ring *ring, u32 size, u16 stride)
{
@@ -457,9 +396,6 @@ int mlx4_en_activate_rx_rings(struct mlx4_en_priv *priv)
ring_ind--;
goto err_allocator;
}
-
- /* Fill Rx buffers */
- ring->full = 0;
}
err = mlx4_en_fill_rx_buffers(priv);
if (err)
@@ -647,33 +583,6 @@ static struct sk_buff *mlx4_en_rx_skb(struct mlx4_en_priv *priv,
return skb;
}
-static void mlx4_en_copy_desc(struct mlx4_en_priv *priv,
- struct mlx4_en_rx_ring *ring,
- int from, int to, int num)
-{
- struct skb_frag_struct *skb_frags_from;
- struct skb_frag_struct *skb_frags_to;
- struct mlx4_en_rx_desc *rx_desc_from;
- struct mlx4_en_rx_desc *rx_desc_to;
- int from_index, to_index;
- int nr, i;
-
- for (i = 0; i < num; i++) {
- from_index = (from + i) & ring->size_mask;
- to_index = (to + i) & ring->size_mask;
- skb_frags_from = ring->rx_info + (from_index << priv->log_rx_info);
- skb_frags_to = ring->rx_info + (to_index << priv->log_rx_info);
- rx_desc_from = ring->buf + (from_index << ring->log_stride);
- rx_desc_to = ring->buf + (to_index << ring->log_stride);
-
- for (nr = 0; nr < priv->num_frags; nr++) {
- skb_frags_to[nr].page = skb_frags_from[nr].page;
- skb_frags_to[nr].page_offset = skb_frags_from[nr].page_offset;
- rx_desc_to->data[nr].addr = rx_desc_from->data[nr].addr;
- }
- }
-}
-
int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int budget)
{
@@ -821,11 +730,6 @@ out:
wmb(); /* ensure HW sees CQ consumer before we post new buffers */
ring->cons = cq->mcq.cons_index;
ring->prod += polled; /* Polled descriptors were realocated in place */
- if (unlikely(!ring->full)) {
- mlx4_en_copy_desc(priv, ring, ring->cons - polled,
- ring->prod - polled, polled);
- mlx4_en_fill_rx_buf(dev, ring);
- }
mlx4_en_update_rx_prod_db(ring);
return polled;
}
diff --git a/drivers/net/mlx4/en_tx.c b/drivers/net/mlx4/en_tx.c
index 5dc7466ad035..08c43f2ae72b 100644
--- a/drivers/net/mlx4/en_tx.c
+++ b/drivers/net/mlx4/en_tx.c
@@ -515,16 +515,9 @@ static int get_real_size(struct sk_buff *skb, struct net_device *dev,
else {
if (netif_msg_tx_err(priv))
en_warn(priv, "Non-linear headers\n");
- dev_kfree_skb_any(skb);
return 0;
}
}
- if (unlikely(*lso_header_size > MAX_LSO_HDR_SIZE)) {
- if (netif_msg_tx_err(priv))
- en_warn(priv, "LSO header size too big\n");
- dev_kfree_skb_any(skb);
- return 0;
- }
} else {
*lso_header_size = 0;
if (!is_inline(skb, NULL))
@@ -616,13 +609,9 @@ int mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
int lso_header_size;
void *fragptr;
- if (unlikely(!skb->len)) {
- dev_kfree_skb_any(skb);
- return NETDEV_TX_OK;
- }
real_size = get_real_size(skb, dev, &lso_header_size);
if (unlikely(!real_size))
- return NETDEV_TX_OK;
+ goto tx_drop;
/* Allign descriptor to TXBB size */
desc_size = ALIGN(real_size, TXBB_SIZE);
@@ -630,8 +619,7 @@ int mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
if (unlikely(nr_txbb > MAX_DESC_TXBBS)) {
if (netif_msg_tx_err(priv))
en_warn(priv, "Oversized header or SG list\n");
- dev_kfree_skb_any(skb);
- return NETDEV_TX_OK;
+ goto tx_drop;
}
tx_ind = skb->queue_mapping;
@@ -653,14 +641,6 @@ int mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_BUSY;
}
- /* Now that we know what Tx ring to use */
- if (unlikely(!priv->port_up)) {
- if (netif_msg_tx_err(priv))
- en_warn(priv, "xmit: port down!\n");
- dev_kfree_skb_any(skb);
- return NETDEV_TX_OK;
- }
-
/* Track current inflight packets for performance analysis */
AVG_PERF_COUNTER(priv->pstats.inflight_avg,
(u32) (ring->prod - ring->cons - 1));
@@ -785,5 +765,10 @@ int mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
mlx4_en_xmit_poll(priv, tx_ind);
return 0;
+
+tx_drop:
+ dev_kfree_skb_any(skb);
+ priv->stats.tx_dropped++;
+ return NETDEV_TX_OK;
}
diff --git a/drivers/net/mlx4/mlx4_en.h b/drivers/net/mlx4/mlx4_en.h
index d43a9e4c2aea..c7c5e86804ff 100644
--- a/drivers/net/mlx4/mlx4_en.h
+++ b/drivers/net/mlx4/mlx4_en.h
@@ -99,7 +99,6 @@
#define RSS_FACTOR 2
#define TXBB_SIZE 64
#define HEADROOM (2048 / TXBB_SIZE + 1)
-#define MAX_LSO_HDR_SIZE 92
#define STAMP_STRIDE 64
#define STAMP_DWORDS (STAMP_STRIDE / 4)
#define STAMP_SHIFT 31
@@ -296,8 +295,6 @@ struct mlx4_en_rx_ring {
u32 prod;
u32 cons;
u32 buf_size;
- int need_refill;
- int full;
void *buf;
void *rx_info;
unsigned long bytes;
@@ -495,7 +492,6 @@ struct mlx4_en_priv {
struct mlx4_en_cq rx_cq[MAX_RX_RINGS];
struct work_struct mcast_task;
struct work_struct mac_task;
- struct delayed_work refill_task;
struct work_struct watchdog_task;
struct work_struct linkstate_task;
struct delayed_work stats_task;
@@ -565,7 +561,6 @@ void mlx4_en_set_default_rss_map(struct mlx4_en_priv *priv,
int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv);
void mlx4_en_release_rss_steer(struct mlx4_en_priv *priv);
int mlx4_en_free_tx_buf(struct net_device *dev, struct mlx4_en_tx_ring *ring);
-void mlx4_en_rx_refill(struct work_struct *work);
void mlx4_en_rx_irq(struct mlx4_cq *mcq);
int mlx4_SET_MCAST_FLTR(struct mlx4_dev *dev, u8 port, u64 mac, u64 clear, u8 mode);
diff --git a/drivers/net/mlx4/mr.c b/drivers/net/mlx4/mr.c
index 5887e4764d22..f96948be0a44 100644
--- a/drivers/net/mlx4/mr.c
+++ b/drivers/net/mlx4/mr.c
@@ -399,11 +399,14 @@ static int mlx4_write_mtt_chunk(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
if (!mtts)
return -ENOMEM;
+ dma_sync_single_for_cpu(&dev->pdev->dev, dma_handle,
+ npages * sizeof (u64), DMA_TO_DEVICE);
+
for (i = 0; i < npages; ++i)
mtts[i] = cpu_to_be64(page_list[i] | MLX4_MTT_FLAG_PRESENT);
- dma_sync_single_for_cpu(&dev->pdev->dev, dma_handle,
- npages * sizeof (u64), DMA_TO_DEVICE);
+ dma_sync_single_for_device(&dev->pdev->dev, dma_handle,
+ npages * sizeof (u64), DMA_TO_DEVICE);
return 0;
}
@@ -547,11 +550,14 @@ int mlx4_map_phys_fmr(struct mlx4_dev *dev, struct mlx4_fmr *fmr, u64 *page_list
/* Make sure MPT status is visible before writing MTT entries */
wmb();
+ dma_sync_single_for_cpu(&dev->pdev->dev, fmr->dma_handle,
+ npages * sizeof(u64), DMA_TO_DEVICE);
+
for (i = 0; i < npages; ++i)
fmr->mtts[i] = cpu_to_be64(page_list[i] | MLX4_MTT_FLAG_PRESENT);
- dma_sync_single_for_cpu(&dev->pdev->dev, fmr->dma_handle,
- npages * sizeof(u64), DMA_TO_DEVICE);
+ dma_sync_single_for_device(&dev->pdev->dev, fmr->dma_handle,
+ npages * sizeof(u64), DMA_TO_DEVICE);
fmr->mpt->key = cpu_to_be32(key);
fmr->mpt->lkey = cpu_to_be32(key);
diff --git a/drivers/net/mv643xx_eth.c b/drivers/net/mv643xx_eth.c
index 745ae8b4a2e8..0f32db3e92ad 100644
--- a/drivers/net/mv643xx_eth.c
+++ b/drivers/net/mv643xx_eth.c
@@ -1750,12 +1750,12 @@ static void mv643xx_eth_program_unicast_filter(struct net_device *dev)
uc_addr_set(mp, dev->dev_addr);
- port_config = rdlp(mp, PORT_CONFIG);
+ port_config = rdlp(mp, PORT_CONFIG) & ~UNICAST_PROMISCUOUS_MODE;
+
nibbles = uc_addr_filter_mask(dev);
if (!nibbles) {
port_config |= UNICAST_PROMISCUOUS_MODE;
- wrlp(mp, PORT_CONFIG, port_config);
- return;
+ nibbles = 0xffff;
}
for (i = 0; i < 16; i += 4) {
@@ -1776,7 +1776,6 @@ static void mv643xx_eth_program_unicast_filter(struct net_device *dev)
wrl(mp, off, v);
}
- port_config &= ~UNICAST_PROMISCUOUS_MODE;
wrlp(mp, PORT_CONFIG, port_config);
}
diff --git a/drivers/net/natsemi.c b/drivers/net/natsemi.c
index c9bfe4eea189..78c088331f57 100644
--- a/drivers/net/natsemi.c
+++ b/drivers/net/natsemi.c
@@ -130,8 +130,8 @@ static int full_duplex[MAX_UNITS];
static const char version[] __devinitconst =
KERN_INFO DRV_NAME " dp8381x driver, version "
DRV_VERSION ", " DRV_RELDATE "\n"
- KERN_INFO " originally by Donald Becker <becker@scyld.com>\n"
- KERN_INFO " 2.4.x kernel port by Jeff Garzik, Tjeerd Mulder\n";
+ " originally by Donald Becker <becker@scyld.com>\n"
+ " 2.4.x kernel port by Jeff Garzik, Tjeerd Mulder\n";
MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
MODULE_DESCRIPTION("National Semiconductor DP8381x series PCI Ethernet driver");
diff --git a/drivers/net/ne.c b/drivers/net/ne.c
index 5c3e242428f1..992dbfffdb05 100644
--- a/drivers/net/ne.c
+++ b/drivers/net/ne.c
@@ -321,7 +321,7 @@ static int __init ne_probe1(struct net_device *dev, unsigned long ioaddr)
}
if (ei_debug && version_printed++ == 0)
- printk(KERN_INFO "%s" KERN_INFO "%s", version1, version2);
+ printk(KERN_INFO "%s%s", version1, version2);
printk(KERN_INFO "NE*000 ethercard probe at %#3lx:", ioaddr);
diff --git a/drivers/net/netxen/netxen_nic.h b/drivers/net/netxen/netxen_nic.h
index 970cedeb5f37..e1cdba752e09 100644
--- a/drivers/net/netxen/netxen_nic.h
+++ b/drivers/net/netxen/netxen_nic.h
@@ -60,7 +60,18 @@
#define _NETXEN_NIC_LINUX_SUBVERSION 30
#define NETXEN_NIC_LINUX_VERSIONID "4.0.30"
-#define NETXEN_VERSION_CODE(a, b, c) (((a) << 16) + ((b) << 8) + (c))
+#define NETXEN_VERSION_CODE(a, b, c) (((a) << 24) + ((b) << 16) + (c))
+#define _major(v) (((v) >> 24) & 0xff)
+#define _minor(v) (((v) >> 16) & 0xff)
+#define _build(v) ((v) & 0xffff)
+
+/* version in image has weird encoding:
+ * 7:0 - major
+ * 15:8 - minor
+ * 31:16 - build (little endian)
+ */
+#define NETXEN_DECODE_VERSION(v) \
+ NETXEN_VERSION_CODE(((v) & 0xff), (((v) >> 8) & 0xff), ((v) >> 16))
#define NETXEN_NUM_FLASH_SECTORS (64)
#define NETXEN_FLASH_SECTOR_SIZE (64 * 1024)
@@ -614,6 +625,7 @@ struct netxen_new_user_info {
#define NX_P2_MN_ROMIMAGE 0
#define NX_P3_CT_ROMIMAGE 1
#define NX_P3_MN_ROMIMAGE 2
+#define NX_FLASH_ROMIMAGE 3
#define NETXEN_USER_START_OLD NETXEN_PXE_START /* for backward compatibility */
@@ -1243,7 +1255,7 @@ struct netxen_adapter {
u32 resv3;
u8 has_link_events;
- u8 resv1;
+ u8 fw_type;
u16 tx_context_id;
u16 mtu;
u16 is_up;
@@ -1387,6 +1399,7 @@ void netxen_free_adapter_offload(struct netxen_adapter *adapter);
int netxen_initialize_adapter_offload(struct netxen_adapter *adapter);
int netxen_phantom_init(struct netxen_adapter *adapter, int pegtune_val);
int netxen_load_firmware(struct netxen_adapter *adapter);
+int netxen_need_fw_reset(struct netxen_adapter *adapter);
void netxen_request_firmware(struct netxen_adapter *adapter);
void netxen_release_firmware(struct netxen_adapter *adapter);
int netxen_pinit_from_rom(struct netxen_adapter *adapter, int verbose);
diff --git a/drivers/net/netxen/netxen_nic_hdr.h b/drivers/net/netxen/netxen_nic_hdr.h
index 3cc047844af3..824103675648 100644
--- a/drivers/net/netxen/netxen_nic_hdr.h
+++ b/drivers/net/netxen/netxen_nic_hdr.h
@@ -853,6 +853,7 @@ enum {
#define NX_PEG_TUNE_CAPABILITY (NETXEN_CAM_RAM(0x02c))
#define NETXEN_CAM_RAM_DMA_WATCHDOG_CTRL (0x14)
+#define NETXEN_PEG_ALIVE_COUNTER (NETXEN_CAM_RAM(0xb0))
#define ISR_MSI_INT_TRIGGER(FUNC) (NETXEN_PCIX_PS_REG(PCIX_MSI_F(FUNC)))
#define ISR_LEGACY_INT_TRIGGERED(VAL) (((VAL) & 0x300) == 0x200)
diff --git a/drivers/net/netxen/netxen_nic_init.c b/drivers/net/netxen/netxen_nic_init.c
index bdb143d2b5c7..b899bd51fcd8 100644
--- a/drivers/net/netxen/netxen_nic_init.c
+++ b/drivers/net/netxen/netxen_nic_init.c
@@ -684,11 +684,84 @@ int netxen_pinit_from_rom(struct netxen_adapter *adapter, int verbose)
}
int
+netxen_need_fw_reset(struct netxen_adapter *adapter)
+{
+ u32 count, old_count;
+ u32 val, version, major, minor, build;
+ int i, timeout;
+ u8 fw_type;
+
+ /* NX2031 firmware doesn't support heartbit */
+ if (NX_IS_REVISION_P2(adapter->ahw.revision_id))
+ return 1;
+
+ /* last attempt had failed */
+ if (NXRD32(adapter, CRB_CMDPEG_STATE) == PHAN_INITIALIZE_FAILED)
+ return 1;
+
+ old_count = count = NXRD32(adapter, NETXEN_PEG_ALIVE_COUNTER);
+
+ for (i = 0; i < 10; i++) {
+
+ timeout = msleep_interruptible(200);
+ if (timeout) {
+ NXWR32(adapter, CRB_CMDPEG_STATE,
+ PHAN_INITIALIZE_FAILED);
+ return -EINTR;
+ }
+
+ count = NXRD32(adapter, NETXEN_PEG_ALIVE_COUNTER);
+ if (count != old_count)
+ break;
+ }
+
+ /* firmware is dead */
+ if (count == old_count)
+ return 1;
+
+ /* check if we have got newer or different file firmware */
+ if (adapter->fw) {
+
+ const struct firmware *fw = adapter->fw;
+
+ val = cpu_to_le32(*(u32 *)&fw->data[NX_FW_VERSION_OFFSET]);
+ version = NETXEN_DECODE_VERSION(val);
+
+ major = NXRD32(adapter, NETXEN_FW_VERSION_MAJOR);
+ minor = NXRD32(adapter, NETXEN_FW_VERSION_MINOR);
+ build = NXRD32(adapter, NETXEN_FW_VERSION_SUB);
+
+ if (version > NETXEN_VERSION_CODE(major, minor, build))
+ return 1;
+
+ if (version == NETXEN_VERSION_CODE(major, minor, build)) {
+
+ val = NXRD32(adapter, NETXEN_MIU_MN_CONTROL);
+ fw_type = (val & 0x4) ?
+ NX_P3_CT_ROMIMAGE : NX_P3_MN_ROMIMAGE;
+
+ if (adapter->fw_type != fw_type)
+ return 1;
+ }
+ }
+
+ return 0;
+}
+
+static char *fw_name[] = {
+ "nxromimg.bin", "nx3fwct.bin", "nx3fwmn.bin", "flash",
+};
+
+int
netxen_load_firmware(struct netxen_adapter *adapter)
{
u64 *ptr64;
u32 i, flashaddr, size;
const struct firmware *fw = adapter->fw;
+ struct pci_dev *pdev = adapter->pdev;
+
+ dev_info(&pdev->dev, "loading firmware from %s\n",
+ fw_name[adapter->fw_type]);
if (NX_IS_REVISION_P2(adapter->ahw.revision_id))
NXWR32(adapter, NETXEN_ROMUSB_GLB_CAS_RST, 1);
@@ -756,7 +829,7 @@ static int
netxen_validate_firmware(struct netxen_adapter *adapter, const char *fwname)
{
__le32 val;
- u32 major, minor, build, ver, min_ver, bios;
+ u32 ver, min_ver, bios;
struct pci_dev *pdev = adapter->pdev;
const struct firmware *fw = adapter->fw;
@@ -768,21 +841,18 @@ netxen_validate_firmware(struct netxen_adapter *adapter, const char *fwname)
return -EINVAL;
val = cpu_to_le32(*(u32 *)&fw->data[NX_FW_VERSION_OFFSET]);
- major = (__force u32)val & 0xff;
- minor = ((__force u32)val >> 8) & 0xff;
- build = (__force u32)val >> 16;
if (NX_IS_REVISION_P3(adapter->ahw.revision_id))
min_ver = NETXEN_VERSION_CODE(4, 0, 216);
else
min_ver = NETXEN_VERSION_CODE(3, 4, 216);
- ver = NETXEN_VERSION_CODE(major, minor, build);
+ ver = NETXEN_DECODE_VERSION(val);
- if ((major > _NETXEN_NIC_LINUX_MAJOR) || (ver < min_ver)) {
+ if ((_major(ver) > _NETXEN_NIC_LINUX_MAJOR) || (ver < min_ver)) {
dev_err(&pdev->dev,
"%s: firmware version %d.%d.%d unsupported\n",
- fwname, major, minor, build);
+ fwname, _major(ver), _minor(ver), _build(ver));
return -EINVAL;
}
@@ -798,22 +868,21 @@ netxen_validate_firmware(struct netxen_adapter *adapter, const char *fwname)
if (netxen_rom_fast_read(adapter,
NX_FW_VERSION_OFFSET, (int *)&val))
return -EIO;
- major = (__force u32)val & 0xff;
- minor = ((__force u32)val >> 8) & 0xff;
- build = (__force u32)val >> 16;
- if (NETXEN_VERSION_CODE(major, minor, build) > ver)
+ val = NETXEN_DECODE_VERSION(val);
+ if (val > ver) {
+ dev_info(&pdev->dev, "%s: firmware is older than flash\n",
+ fwname);
return -EINVAL;
+ }
NXWR32(adapter, NETXEN_CAM_RAM(0x1fc), NETXEN_BDINFO_MAGIC);
return 0;
}
-static char *fw_name[] = { "nxromimg.bin", "nx3fwct.bin", "nx3fwmn.bin" };
-
void netxen_request_firmware(struct netxen_adapter *adapter)
{
u32 capability, flashed_ver;
- int fw_type;
+ u8 fw_type;
struct pci_dev *pdev = adapter->pdev;
int rc = 0;
@@ -830,6 +899,8 @@ request_mn:
netxen_rom_fast_read(adapter,
NX_FW_VERSION_OFFSET, (int *)&flashed_ver);
+ flashed_ver = NETXEN_DECODE_VERSION(flashed_ver);
+
if (flashed_ver >= NETXEN_VERSION_CODE(4, 0, 220)) {
capability = NXRD32(adapter, NX_PEG_TUNE_CAPABILITY);
if (capability & NX_PEG_TUNE_MN_PRESENT) {
@@ -838,6 +909,10 @@ request_mn:
}
}
+ fw_type = NX_FLASH_ROMIMAGE;
+ adapter->fw = NULL;
+ goto done;
+
request_fw:
rc = request_firmware(&adapter->fw, fw_name[fw_type], &pdev->dev);
if (rc != 0) {
@@ -846,6 +921,7 @@ request_fw:
goto request_mn;
}
+ fw_type = NX_FLASH_ROMIMAGE;
adapter->fw = NULL;
goto done;
}
@@ -859,16 +935,13 @@ request_fw:
goto request_mn;
}
+ fw_type = NX_FLASH_ROMIMAGE;
adapter->fw = NULL;
goto done;
}
done:
- if (adapter->fw)
- dev_info(&pdev->dev, "loading firmware from file %s\n",
- fw_name[fw_type]);
- else
- dev_info(&pdev->dev, "loading firmware from flash\n");
+ adapter->fw_type = fw_type;
}
@@ -944,28 +1017,31 @@ int netxen_phantom_init(struct netxen_adapter *adapter, int pegtune_val)
u32 val = 0;
int retries = 60;
- if (!pegtune_val) {
- do {
- val = NXRD32(adapter, CRB_CMDPEG_STATE);
+ if (pegtune_val)
+ return 0;
- if (val == PHAN_INITIALIZE_COMPLETE ||
- val == PHAN_INITIALIZE_ACK)
- return 0;
+ do {
+ val = NXRD32(adapter, CRB_CMDPEG_STATE);
- msleep(500);
+ switch (val) {
+ case PHAN_INITIALIZE_COMPLETE:
+ case PHAN_INITIALIZE_ACK:
+ return 0;
+ case PHAN_INITIALIZE_FAILED:
+ goto out_err;
+ default:
+ break;
+ }
- } while (--retries);
+ msleep(500);
- if (!retries) {
- pegtune_val = NXRD32(adapter,
- NETXEN_ROMUSB_GLB_PEGTUNE_DONE);
- printk(KERN_WARNING "netxen_phantom_init: init failed, "
- "pegtune_val=%x\n", pegtune_val);
- return -1;
- }
- }
+ } while (--retries);
- return 0;
+ NXWR32(adapter, CRB_CMDPEG_STATE, PHAN_INITIALIZE_FAILED);
+
+out_err:
+ dev_warn(&adapter->pdev->dev, "firmware init failed\n");
+ return -EIO;
}
static int
diff --git a/drivers/net/netxen/netxen_nic_main.c b/drivers/net/netxen/netxen_nic_main.c
index 71daa3d5f114..27539ddf94c4 100644
--- a/drivers/net/netxen/netxen_nic_main.c
+++ b/drivers/net/netxen/netxen_nic_main.c
@@ -705,7 +705,7 @@ netxen_start_firmware(struct netxen_adapter *adapter, int request_fw)
first_driver = (adapter->ahw.pci_func == 0);
if (!first_driver)
- return 0;
+ goto wait_init;
first_boot = NXRD32(adapter, NETXEN_CAM_RAM(0x1fc));
@@ -718,6 +718,10 @@ netxen_start_firmware(struct netxen_adapter *adapter, int request_fw)
if (request_fw)
netxen_request_firmware(adapter);
+ err = netxen_need_fw_reset(adapter);
+ if (err <= 0)
+ return err;
+
if (first_boot != 0x55555555) {
NXWR32(adapter, CRB_CMDPEG_STATE, 0);
netxen_pinit_from_rom(adapter, 0);
@@ -752,6 +756,7 @@ netxen_start_firmware(struct netxen_adapter *adapter, int request_fw)
| (_NETXEN_NIC_LINUX_SUBVERSION);
NXWR32(adapter, CRB_DRIVER_VERSION, val);
+wait_init:
/* Handshake with the card before we register the devices. */
err = netxen_phantom_init(adapter, NETXEN_NIC_PEG_TUNE);
if (err) {
@@ -1178,6 +1183,7 @@ static void __devexit netxen_nic_remove(struct pci_dev *pdev)
free_netdev(netdev);
}
+#ifdef CONFIG_PM
static int
netxen_nic_suspend(struct pci_dev *pdev, pm_message_t state)
{
@@ -1242,6 +1248,7 @@ netxen_nic_resume(struct pci_dev *pdev)
return 0;
}
+#endif
static int netxen_nic_open(struct net_device *netdev)
{
@@ -1771,8 +1778,10 @@ static struct pci_driver netxen_driver = {
.id_table = netxen_pci_tbl,
.probe = netxen_nic_probe,
.remove = __devexit_p(netxen_nic_remove),
+#ifdef CONFIG_PM
.suspend = netxen_nic_suspend,
.resume = netxen_nic_resume
+#endif
};
/* Driver Registration on NetXen card */
diff --git a/drivers/net/pci-skeleton.c b/drivers/net/pci-skeleton.c
index 8c1f6988f398..89f7b2ad5231 100644
--- a/drivers/net/pci-skeleton.c
+++ b/drivers/net/pci-skeleton.c
@@ -105,7 +105,7 @@ IVc. Errata
static char version[] __devinitdata =
KERN_INFO NETDRV_DRIVER_LOAD_MSG "\n"
-KERN_INFO " Support available from http://foo.com/bar/baz.html\n";
+" Support available from http://foo.com/bar/baz.html\n";
/* define to 1 to enable PIO instead of MMIO */
#undef USE_IO_OPS
diff --git a/drivers/net/pcmcia/ibmtr_cs.c b/drivers/net/pcmcia/ibmtr_cs.c
index f51944b28cfa..06618af1a468 100644
--- a/drivers/net/pcmcia/ibmtr_cs.c
+++ b/drivers/net/pcmcia/ibmtr_cs.c
@@ -298,14 +298,11 @@ static int __devinit ibmtr_config(struct pcmcia_device *link)
strcpy(info->node.dev_name, dev->name);
- printk(KERN_INFO "%s: port %#3lx, irq %d,",
- dev->name, dev->base_addr, dev->irq);
- printk (" mmio %#5lx,", (u_long)ti->mmio);
- printk (" sram %#5lx,", (u_long)ti->sram_base << 12);
- printk ("\n" KERN_INFO " hwaddr=");
- for (i = 0; i < TR_ALEN; i++)
- printk("%02X", dev->dev_addr[i]);
- printk("\n");
+ printk(KERN_INFO
+ "%s: port %#3lx, irq %d, mmio %#5lx, sram %#5lx, hwaddr=%pM\n",
+ dev->name, dev->base_addr, dev->irq,
+ (u_long)ti->mmio, (u_long)(ti->sram_base << 12),
+ dev->dev_addr);
return 0;
cs_failed:
diff --git a/drivers/net/pcmcia/nmclan_cs.c b/drivers/net/pcmcia/nmclan_cs.c
index 02ef63ed1f99..36de91baf238 100644
--- a/drivers/net/pcmcia/nmclan_cs.c
+++ b/drivers/net/pcmcia/nmclan_cs.c
@@ -1425,15 +1425,12 @@ static void BuildLAF(int *ladrf, int *adr)
ladrf[byte] |= (1 << (hashcode & 7));
#ifdef PCMCIA_DEBUG
- if (pc_debug > 2) {
- printk(KERN_DEBUG " adr =");
- for (i = 0; i < 6; i++)
- printk(" %02X", adr[i]);
- printk("\n" KERN_DEBUG " hashcode = %d(decimal), ladrf[0:63]"
- " =", hashcode);
- for (i = 0; i < 8; i++)
- printk(" %02X", ladrf[i]);
- printk("\n");
+ if (pc_debug > 2)
+ printk(KERN_DEBUG " adr =%pM\n", adr);
+ printk(KERN_DEBUG " hashcode = %d(decimal), ladrf[0:63] =", hashcode);
+ for (i = 0; i < 8; i++)
+ printk(KERN_CONT " %02X", ladrf[i]);
+ printk(KERN_CONT "\n");
}
#endif
} /* BuildLAF */
diff --git a/drivers/net/pcmcia/pcnet_cs.c b/drivers/net/pcmcia/pcnet_cs.c
index 652a36888361..9ef1c1bfa83d 100644
--- a/drivers/net/pcmcia/pcnet_cs.c
+++ b/drivers/net/pcmcia/pcnet_cs.c
@@ -1727,6 +1727,7 @@ static struct pcmcia_device_id pcnet_ids[] = {
PCMCIA_DEVICE_PROD_ID12("PRETEC", "Ethernet CompactLAN 10BaseT 3.3V", 0xebf91155, 0x7f5a4f50),
PCMCIA_DEVICE_PROD_ID12("Psion Dacom", "Gold Card Ethernet", 0xf5f025c2, 0x3a30e110),
PCMCIA_DEVICE_PROD_ID12("=RELIA==", "Ethernet", 0xcdd0644a, 0x00b2e941),
+ PCMCIA_DEVICE_PROD_ID12("RIOS Systems Co.", "PC CARD3 ETHERNET", 0x7dd33481, 0x10b41826),
PCMCIA_DEVICE_PROD_ID12("RP", "1625B Ethernet NE2000 Compatible", 0xe3e66e22, 0xb96150df),
PCMCIA_DEVICE_PROD_ID12("RPTI", "EP400 Ethernet NE2000 Compatible", 0xdc6f88fd, 0x4a7e2ae0),
PCMCIA_DEVICE_PROD_ID12("RPTI", "EP401 Ethernet NE2000 Compatible", 0xdc6f88fd, 0x4bcbd7fd),
diff --git a/drivers/net/pcnet32.c b/drivers/net/pcnet32.c
index 1c35e1d637a0..28368157dac4 100644
--- a/drivers/net/pcnet32.c
+++ b/drivers/net/pcnet32.c
@@ -485,7 +485,7 @@ static void pcnet32_realloc_tx_ring(struct net_device *dev,
&new_ring_dma_addr);
if (new_tx_ring == NULL) {
if (netif_msg_drv(lp))
- printk("\n" KERN_ERR
+ printk(KERN_ERR
"%s: Consistent memory allocation failed.\n",
dev->name);
return;
@@ -496,7 +496,7 @@ static void pcnet32_realloc_tx_ring(struct net_device *dev,
GFP_ATOMIC);
if (!new_dma_addr_list) {
if (netif_msg_drv(lp))
- printk("\n" KERN_ERR
+ printk(KERN_ERR
"%s: Memory allocation failed.\n", dev->name);
goto free_new_tx_ring;
}
@@ -505,7 +505,7 @@ static void pcnet32_realloc_tx_ring(struct net_device *dev,
GFP_ATOMIC);
if (!new_skb_list) {
if (netif_msg_drv(lp))
- printk("\n" KERN_ERR
+ printk(KERN_ERR
"%s: Memory allocation failed.\n", dev->name);
goto free_new_lists;
}
@@ -563,7 +563,7 @@ static void pcnet32_realloc_rx_ring(struct net_device *dev,
&new_ring_dma_addr);
if (new_rx_ring == NULL) {
if (netif_msg_drv(lp))
- printk("\n" KERN_ERR
+ printk(KERN_ERR
"%s: Consistent memory allocation failed.\n",
dev->name);
return;
@@ -574,7 +574,7 @@ static void pcnet32_realloc_rx_ring(struct net_device *dev,
GFP_ATOMIC);
if (!new_dma_addr_list) {
if (netif_msg_drv(lp))
- printk("\n" KERN_ERR
+ printk(KERN_ERR
"%s: Memory allocation failed.\n", dev->name);
goto free_new_rx_ring;
}
@@ -583,7 +583,7 @@ static void pcnet32_realloc_rx_ring(struct net_device *dev,
GFP_ATOMIC);
if (!new_skb_list) {
if (netif_msg_drv(lp))
- printk("\n" KERN_ERR
+ printk(KERN_ERR
"%s: Memory allocation failed.\n", dev->name);
goto free_new_lists;
}
@@ -1766,38 +1766,38 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
/* Version 0x2623 and 0x2624 */
if (((chip_version + 1) & 0xfffe) == 0x2624) {
i = a->read_csr(ioaddr, 80) & 0x0C00; /* Check tx_start_pt */
- printk("\n" KERN_INFO " tx_start_pt(0x%04x):", i);
+ printk(KERN_INFO " tx_start_pt(0x%04x):", i);
switch (i >> 10) {
case 0:
- printk(" 20 bytes,");
+ printk(KERN_CONT " 20 bytes,");
break;
case 1:
- printk(" 64 bytes,");
+ printk(KERN_CONT " 64 bytes,");
break;
case 2:
- printk(" 128 bytes,");
+ printk(KERN_CONT " 128 bytes,");
break;
case 3:
- printk("~220 bytes,");
+ printk(KERN_CONT "~220 bytes,");
break;
}
i = a->read_bcr(ioaddr, 18); /* Check Burst/Bus control */
- printk(" BCR18(%x):", i & 0xffff);
+ printk(KERN_CONT " BCR18(%x):", i & 0xffff);
if (i & (1 << 5))
- printk("BurstWrEn ");
+ printk(KERN_CONT "BurstWrEn ");
if (i & (1 << 6))
- printk("BurstRdEn ");
+ printk(KERN_CONT "BurstRdEn ");
if (i & (1 << 7))
- printk("DWordIO ");
+ printk(KERN_CONT "DWordIO ");
if (i & (1 << 11))
- printk("NoUFlow ");
+ printk(KERN_CONT "NoUFlow ");
i = a->read_bcr(ioaddr, 25);
- printk("\n" KERN_INFO " SRAMSIZE=0x%04x,", i << 8);
+ printk(KERN_INFO " SRAMSIZE=0x%04x,", i << 8);
i = a->read_bcr(ioaddr, 26);
- printk(" SRAM_BND=0x%04x,", i << 8);
+ printk(KERN_CONT " SRAM_BND=0x%04x,", i << 8);
i = a->read_bcr(ioaddr, 27);
if (i & (1 << 14))
- printk("LowLatRx");
+ printk(KERN_CONT "LowLatRx");
}
}
@@ -1996,7 +1996,7 @@ static int pcnet32_alloc_ring(struct net_device *dev, const char *name)
&lp->tx_ring_dma_addr);
if (lp->tx_ring == NULL) {
if (netif_msg_drv(lp))
- printk("\n" KERN_ERR PFX
+ printk(KERN_ERR PFX
"%s: Consistent memory allocation failed.\n",
name);
return -ENOMEM;
@@ -2008,7 +2008,7 @@ static int pcnet32_alloc_ring(struct net_device *dev, const char *name)
&lp->rx_ring_dma_addr);
if (lp->rx_ring == NULL) {
if (netif_msg_drv(lp))
- printk("\n" KERN_ERR PFX
+ printk(KERN_ERR PFX
"%s: Consistent memory allocation failed.\n",
name);
return -ENOMEM;
@@ -2018,7 +2018,7 @@ static int pcnet32_alloc_ring(struct net_device *dev, const char *name)
GFP_ATOMIC);
if (!lp->tx_dma_addr) {
if (netif_msg_drv(lp))
- printk("\n" KERN_ERR PFX
+ printk(KERN_ERR PFX
"%s: Memory allocation failed.\n", name);
return -ENOMEM;
}
@@ -2027,7 +2027,7 @@ static int pcnet32_alloc_ring(struct net_device *dev, const char *name)
GFP_ATOMIC);
if (!lp->rx_dma_addr) {
if (netif_msg_drv(lp))
- printk("\n" KERN_ERR PFX
+ printk(KERN_ERR PFX
"%s: Memory allocation failed.\n", name);
return -ENOMEM;
}
@@ -2036,7 +2036,7 @@ static int pcnet32_alloc_ring(struct net_device *dev, const char *name)
GFP_ATOMIC);
if (!lp->tx_skbuff) {
if (netif_msg_drv(lp))
- printk("\n" KERN_ERR PFX
+ printk(KERN_ERR PFX
"%s: Memory allocation failed.\n", name);
return -ENOMEM;
}
@@ -2045,7 +2045,7 @@ static int pcnet32_alloc_ring(struct net_device *dev, const char *name)
GFP_ATOMIC);
if (!lp->rx_skbuff) {
if (netif_msg_drv(lp))
- printk("\n" KERN_ERR PFX
+ printk(KERN_ERR PFX
"%s: Memory allocation failed.\n", name);
return -ENOMEM;
}
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index 61755cbd978e..eda94fcd4065 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -928,13 +928,32 @@ static void phy_state_machine(struct work_struct *work)
* Otherwise, it's 0, and we're
* still waiting for AN */
if (err > 0) {
- phydev->state = PHY_RUNNING;
+ err = phy_read_status(phydev);
+ if (err)
+ break;
+
+ if (phydev->link) {
+ phydev->state = PHY_RUNNING;
+ netif_carrier_on(phydev->attached_dev);
+ } else
+ phydev->state = PHY_NOLINK;
+ phydev->adjust_link(phydev->attached_dev);
} else {
phydev->state = PHY_AN;
phydev->link_timeout = PHY_AN_TIMEOUT;
}
- } else
- phydev->state = PHY_RUNNING;
+ } else {
+ err = phy_read_status(phydev);
+ if (err)
+ break;
+
+ if (phydev->link) {
+ phydev->state = PHY_RUNNING;
+ netif_carrier_on(phydev->attached_dev);
+ } else
+ phydev->state = PHY_NOLINK;
+ phydev->adjust_link(phydev->attached_dev);
+ }
break;
}
diff --git a/drivers/net/ppp_async.c b/drivers/net/ppp_async.c
index 6de8399d6dd9..17c116bb332c 100644
--- a/drivers/net/ppp_async.c
+++ b/drivers/net/ppp_async.c
@@ -356,7 +356,6 @@ ppp_asynctty_receive(struct tty_struct *tty, const unsigned char *buf,
if (!skb_queue_empty(&ap->rqueue))
tasklet_schedule(&ap->tsk);
ap_put(ap);
- tty_unthrottle(tty);
}
static void
diff --git a/drivers/net/ppp_synctty.c b/drivers/net/ppp_synctty.c
index d2fa2db13586..aa3d39f38e22 100644
--- a/drivers/net/ppp_synctty.c
+++ b/drivers/net/ppp_synctty.c
@@ -397,7 +397,6 @@ ppp_sync_receive(struct tty_struct *tty, const unsigned char *buf,
if (!skb_queue_empty(&ap->rqueue))
tasklet_schedule(&ap->tsk);
sp_put(ap);
- tty_unthrottle(tty);
}
static void
diff --git a/drivers/net/qla3xxx.c b/drivers/net/qla3xxx.c
index 8a823ecc99a9..3e4b67aaa6ea 100644
--- a/drivers/net/qla3xxx.c
+++ b/drivers/net/qla3xxx.c
@@ -3142,6 +3142,7 @@ static int ql_adapter_initialize(struct ql3_adapter *qdev)
(void __iomem *)port_regs;
u32 delay = 10;
int status = 0;
+ unsigned long hw_flags = 0;
if(ql_mii_setup(qdev))
return -1;
@@ -3150,7 +3151,8 @@ static int ql_adapter_initialize(struct ql3_adapter *qdev)
ql_write_common_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg,
(ISP_SERIAL_PORT_IF_WE |
(ISP_SERIAL_PORT_IF_WE << 16)));
-
+ /* Give the PHY time to come out of reset. */
+ mdelay(100);
qdev->port_link_state = LS_DOWN;
netif_carrier_off(qdev->ndev);
@@ -3350,7 +3352,9 @@ static int ql_adapter_initialize(struct ql3_adapter *qdev)
value = ql_read_page0_reg(qdev, &port_regs->portStatus);
if (value & PORT_STATUS_IC)
break;
+ spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
msleep(500);
+ spin_lock_irqsave(&qdev->hw_lock, hw_flags);
} while (--delay);
if (delay == 0) {
@@ -3837,7 +3841,9 @@ static void ql_reset_work(struct work_struct *work)
16) | ISP_CONTROL_RI));
}
+ spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
ssleep(1);
+ spin_lock_irqsave(&qdev->hw_lock, hw_flags);
} while (--max_wait_time);
spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
diff --git a/drivers/net/qlge/qlge.h b/drivers/net/qlge/qlge.h
index 156e02e8905d..6ed5317ab1c0 100644
--- a/drivers/net/qlge/qlge.h
+++ b/drivers/net/qlge/qlge.h
@@ -1607,6 +1607,8 @@ int ql_mb_get_fw_state(struct ql_adapter *qdev);
int ql_cam_route_initialize(struct ql_adapter *qdev);
int ql_read_mpi_reg(struct ql_adapter *qdev, u32 reg, u32 *data);
int ql_mb_about_fw(struct ql_adapter *qdev);
+void ql_link_on(struct ql_adapter *qdev);
+void ql_link_off(struct ql_adapter *qdev);
#if 1
#define QL_ALL_DUMP
diff --git a/drivers/net/qlge/qlge_ethtool.c b/drivers/net/qlge/qlge_ethtool.c
index 37c99fe79770..eb6a9ee640ed 100644
--- a/drivers/net/qlge/qlge_ethtool.c
+++ b/drivers/net/qlge/qlge_ethtool.c
@@ -59,7 +59,7 @@ static int ql_update_ring_coalescing(struct ql_adapter *qdev)
cqicb->pkt_delay =
cpu_to_le16(qdev->tx_max_coalesced_frames);
cqicb->flags = FLAGS_LI;
- status = ql_write_cfg(qdev, cqicb, sizeof(cqicb),
+ status = ql_write_cfg(qdev, cqicb, sizeof(*cqicb),
CFG_LCQ, rx_ring->cq_id);
if (status) {
QPRINTK(qdev, IFUP, ERR,
@@ -82,7 +82,7 @@ static int ql_update_ring_coalescing(struct ql_adapter *qdev)
cqicb->pkt_delay =
cpu_to_le16(qdev->rx_max_coalesced_frames);
cqicb->flags = FLAGS_LI;
- status = ql_write_cfg(qdev, cqicb, sizeof(cqicb),
+ status = ql_write_cfg(qdev, cqicb, sizeof(*cqicb),
CFG_LCQ, rx_ring->cq_id);
if (status) {
QPRINTK(qdev, IFUP, ERR,
diff --git a/drivers/net/qlge/qlge_main.c b/drivers/net/qlge/qlge_main.c
index 90d1f76c0e8b..5768af17f168 100644
--- a/drivers/net/qlge/qlge_main.c
+++ b/drivers/net/qlge/qlge_main.c
@@ -214,6 +214,10 @@ int ql_write_cfg(struct ql_adapter *qdev, void *ptr, int size, u32 bit,
return -ENOMEM;
}
+ status = ql_sem_spinlock(qdev, SEM_ICB_MASK);
+ if (status)
+ return status;
+
status = ql_wait_cfg(qdev, bit);
if (status) {
QPRINTK(qdev, IFUP, ERR,
@@ -221,12 +225,8 @@ int ql_write_cfg(struct ql_adapter *qdev, void *ptr, int size, u32 bit,
goto exit;
}
- status = ql_sem_spinlock(qdev, SEM_ICB_MASK);
- if (status)
- goto exit;
ql_write32(qdev, ICB_L, (u32) map);
ql_write32(qdev, ICB_H, (u32) (map >> 32));
- ql_sem_unlock(qdev, SEM_ICB_MASK); /* does flush too */
mask = CFG_Q_MASK | (bit << 16);
value = bit | (q_id << CFG_Q_SHIFT);
@@ -237,6 +237,7 @@ int ql_write_cfg(struct ql_adapter *qdev, void *ptr, int size, u32 bit,
*/
status = ql_wait_cfg(qdev, bit);
exit:
+ ql_sem_unlock(qdev, SEM_ICB_MASK); /* does flush too */
pci_unmap_single(qdev->pdev, map, size, direction);
return status;
}
@@ -412,6 +413,57 @@ exit:
return status;
}
+/* Set or clear MAC address in hardware. We sometimes
+ * have to clear it to prevent wrong frame routing
+ * especially in a bonding environment.
+ */
+static int ql_set_mac_addr(struct ql_adapter *qdev, int set)
+{
+ int status;
+ char zero_mac_addr[ETH_ALEN];
+ char *addr;
+
+ if (set) {
+ addr = &qdev->ndev->dev_addr[0];
+ QPRINTK(qdev, IFUP, DEBUG,
+ "Set Mac addr %02x:%02x:%02x:%02x:%02x:%02x\n",
+ addr[0], addr[1], addr[2], addr[3],
+ addr[4], addr[5]);
+ } else {
+ memset(zero_mac_addr, 0, ETH_ALEN);
+ addr = &zero_mac_addr[0];
+ QPRINTK(qdev, IFUP, DEBUG,
+ "Clearing MAC address on %s\n",
+ qdev->ndev->name);
+ }
+ status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
+ if (status)
+ return status;
+ status = ql_set_mac_addr_reg(qdev, (u8 *) addr,
+ MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
+ ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
+ if (status)
+ QPRINTK(qdev, IFUP, ERR, "Failed to init mac "
+ "address.\n");
+ return status;
+}
+
+void ql_link_on(struct ql_adapter *qdev)
+{
+ QPRINTK(qdev, LINK, ERR, "%s: Link is up.\n",
+ qdev->ndev->name);
+ netif_carrier_on(qdev->ndev);
+ ql_set_mac_addr(qdev, 1);
+}
+
+void ql_link_off(struct ql_adapter *qdev)
+{
+ QPRINTK(qdev, LINK, ERR, "%s: Link is down.\n",
+ qdev->ndev->name);
+ netif_carrier_off(qdev->ndev);
+ ql_set_mac_addr(qdev, 0);
+}
+
/* Get a specific frame routing value from the CAM.
* Used for debug and reg dump.
*/
@@ -1628,7 +1680,7 @@ static void ql_process_mac_tx_intr(struct ql_adapter *qdev,
tx_ring = &qdev->tx_ring[mac_rsp->txq_idx];
tx_ring_desc = &tx_ring->q[mac_rsp->tid];
ql_unmap_send(qdev, tx_ring_desc, tx_ring_desc->map_cnt);
- qdev->stats.tx_bytes += tx_ring_desc->map_cnt;
+ qdev->stats.tx_bytes += (tx_ring_desc->skb)->len;
qdev->stats.tx_packets++;
dev_kfree_skb(tx_ring_desc->skb);
tx_ring_desc->skb = NULL;
@@ -1660,13 +1712,13 @@ static void ql_process_mac_tx_intr(struct ql_adapter *qdev,
/* Fire up a handler to reset the MPI processor. */
void ql_queue_fw_error(struct ql_adapter *qdev)
{
- netif_carrier_off(qdev->ndev);
+ ql_link_off(qdev);
queue_delayed_work(qdev->workqueue, &qdev->mpi_reset_work, 0);
}
void ql_queue_asic_error(struct ql_adapter *qdev)
{
- netif_carrier_off(qdev->ndev);
+ ql_link_off(qdev);
ql_disable_interrupts(qdev);
/* Clear adapter up bit to signal the recovery
* process that it shouldn't kill the reset worker
@@ -2104,7 +2156,7 @@ static int qlge_send(struct sk_buff *skb, struct net_device *ndev)
}
tx_ring_desc = &tx_ring->q[tx_ring->prod_idx];
mac_iocb_ptr = tx_ring_desc->queue_entry;
- memset((void *)mac_iocb_ptr, 0, sizeof(mac_iocb_ptr));
+ memset((void *)mac_iocb_ptr, 0, sizeof(*mac_iocb_ptr));
mac_iocb_ptr->opcode = OPCODE_OB_MAC_IOCB;
mac_iocb_ptr->tid = tx_ring_desc->index;
@@ -2743,7 +2795,7 @@ static int ql_start_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
ql_init_tx_ring(qdev, tx_ring);
- err = ql_write_cfg(qdev, wqicb, sizeof(wqicb), CFG_LRQ,
+ err = ql_write_cfg(qdev, wqicb, sizeof(*wqicb), CFG_LRQ,
(u16) tx_ring->wq_id);
if (err) {
QPRINTK(qdev, IFUP, ERR, "Failed to load tx_ring.\n");
@@ -3008,7 +3060,7 @@ static int ql_start_rss(struct ql_adapter *qdev)
int i;
u8 *hash_id = (u8 *) ricb->hash_cq_id;
- memset((void *)ricb, 0, sizeof(ricb));
+ memset((void *)ricb, 0, sizeof(*ricb));
ricb->base_cq = qdev->rss_ring_first_cq_id | RSS_L4K;
ricb->flags =
@@ -3030,7 +3082,7 @@ static int ql_start_rss(struct ql_adapter *qdev)
QPRINTK(qdev, IFUP, DEBUG, "Initializing RSS.\n");
- status = ql_write_cfg(qdev, ricb, sizeof(ricb), CFG_LR, 0);
+ status = ql_write_cfg(qdev, ricb, sizeof(*ricb), CFG_LR, 0);
if (status) {
QPRINTK(qdev, IFUP, ERR, "Failed to load RICB.\n");
return status;
@@ -3039,25 +3091,40 @@ static int ql_start_rss(struct ql_adapter *qdev)
return status;
}
-/* Initialize the frame-to-queue routing. */
-static int ql_route_initialize(struct ql_adapter *qdev)
+static int ql_clear_routing_entries(struct ql_adapter *qdev)
{
- int status = 0;
- int i;
+ int i, status = 0;
status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
if (status)
return status;
-
/* Clear all the entries in the routing table. */
for (i = 0; i < 16; i++) {
status = ql_set_routing_reg(qdev, i, 0, 0);
if (status) {
QPRINTK(qdev, IFUP, ERR,
- "Failed to init routing register for CAM packets.\n");
- goto exit;
+ "Failed to init routing register for CAM "
+ "packets.\n");
+ break;
}
}
+ ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
+ return status;
+}
+
+/* Initialize the frame-to-queue routing. */
+static int ql_route_initialize(struct ql_adapter *qdev)
+{
+ int status = 0;
+
+ status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
+ if (status)
+ return status;
+
+ /* Clear all the entries in the routing table. */
+ status = ql_clear_routing_entries(qdev);
+ if (status)
+ goto exit;
status = ql_set_routing_reg(qdev, RT_IDX_ALL_ERR_SLOT, RT_IDX_ERR, 1);
if (status) {
@@ -3096,14 +3163,15 @@ exit:
int ql_cam_route_initialize(struct ql_adapter *qdev)
{
- int status;
+ int status, set;
- status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
- if (status)
- return status;
- status = ql_set_mac_addr_reg(qdev, (u8 *) qdev->ndev->perm_addr,
- MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
- ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
+ /* If check if the link is up and use to
+ * determine if we are setting or clearing
+ * the MAC address in the CAM.
+ */
+ set = ql_read32(qdev, STS);
+ set &= qdev->port_link_up;
+ status = ql_set_mac_addr(qdev, set);
if (status) {
QPRINTK(qdev, IFUP, ERR, "Failed to init mac address.\n");
return status;
@@ -3210,9 +3278,17 @@ static int ql_adapter_reset(struct ql_adapter *qdev)
{
u32 value;
int status = 0;
- unsigned long end_jiffies = jiffies +
- max((unsigned long)1, usecs_to_jiffies(30));
+ unsigned long end_jiffies;
+ /* Clear all the entries in the routing table. */
+ status = ql_clear_routing_entries(qdev);
+ if (status) {
+ QPRINTK(qdev, IFUP, ERR, "Failed to clear routing bits.\n");
+ return status;
+ }
+
+ end_jiffies = jiffies +
+ max((unsigned long)1, usecs_to_jiffies(30));
ql_write32(qdev, RST_FO, (RST_FO_FR << 16) | RST_FO_FR);
do {
@@ -3252,7 +3328,7 @@ static int ql_adapter_down(struct ql_adapter *qdev)
int i, status = 0;
struct rx_ring *rx_ring;
- netif_carrier_off(qdev->ndev);
+ ql_link_off(qdev);
/* Don't kill the reset worker thread if we
* are in the process of recovery.
@@ -3319,8 +3395,12 @@ static int ql_adapter_up(struct ql_adapter *qdev)
}
set_bit(QL_ADAPTER_UP, &qdev->flags);
ql_alloc_rx_buffers(qdev);
- if ((ql_read32(qdev, STS) & qdev->port_init))
- netif_carrier_on(qdev->ndev);
+ /* If the port is initialized and the
+ * link is up the turn on the carrier.
+ */
+ if ((ql_read32(qdev, STS) & qdev->port_init) &&
+ (ql_read32(qdev, STS) & qdev->port_link_up))
+ ql_link_on(qdev);
ql_enable_interrupts(qdev);
ql_enable_all_completion_interrupts(qdev);
netif_tx_start_all_queues(qdev->ndev);
@@ -3346,11 +3426,6 @@ static int ql_get_adapter_resources(struct ql_adapter *qdev)
return -ENOMEM;
}
status = ql_request_irq(qdev);
- if (status)
- goto err_irq;
- return status;
-err_irq:
- ql_free_mem_resources(qdev);
return status;
}
@@ -3414,7 +3489,7 @@ static int ql_configure_rings(struct ql_adapter *qdev)
for (i = 0; i < qdev->tx_ring_count; i++) {
tx_ring = &qdev->tx_ring[i];
- memset((void *)tx_ring, 0, sizeof(tx_ring));
+ memset((void *)tx_ring, 0, sizeof(*tx_ring));
tx_ring->qdev = qdev;
tx_ring->wq_id = i;
tx_ring->wq_len = qdev->tx_ring_size;
@@ -3430,7 +3505,7 @@ static int ql_configure_rings(struct ql_adapter *qdev)
for (i = 0; i < qdev->rx_ring_count; i++) {
rx_ring = &qdev->rx_ring[i];
- memset((void *)rx_ring, 0, sizeof(rx_ring));
+ memset((void *)rx_ring, 0, sizeof(*rx_ring));
rx_ring->qdev = qdev;
rx_ring->cq_id = i;
rx_ring->cpu = i % cpu_cnt; /* CPU to run handler on. */
@@ -3789,7 +3864,7 @@ static int __devinit ql_init_device(struct pci_dev *pdev,
int pos, err = 0;
u16 val16;
- memset((void *)qdev, 0, sizeof(qdev));
+ memset((void *)qdev, 0, sizeof(*qdev));
err = pci_enable_device(pdev);
if (err) {
dev_err(&pdev->dev, "PCI device enable failed.\n");
@@ -3976,7 +4051,7 @@ static int __devinit qlge_probe(struct pci_dev *pdev,
pci_disable_device(pdev);
return err;
}
- netif_carrier_off(ndev);
+ ql_link_off(qdev);
ql_display_dev_info(ndev);
cards_found++;
return 0;
diff --git a/drivers/net/qlge/qlge_mpi.c b/drivers/net/qlge/qlge_mpi.c
index 71afbf8b9c50..6685bd97da91 100644
--- a/drivers/net/qlge/qlge_mpi.c
+++ b/drivers/net/qlge/qlge_mpi.c
@@ -238,7 +238,7 @@ static void ql_link_up(struct ql_adapter *qdev, struct mbox_params *mbcp)
&qdev->mpi_port_cfg_work, 0);
}
- netif_carrier_on(qdev->ndev);
+ ql_link_on(qdev);
}
static void ql_link_down(struct ql_adapter *qdev, struct mbox_params *mbcp)
@@ -251,7 +251,7 @@ static void ql_link_down(struct ql_adapter *qdev, struct mbox_params *mbcp)
if (status)
QPRINTK(qdev, DRV, ERR, "Link down AEN broken!\n");
- netif_carrier_off(qdev->ndev);
+ ql_link_off(qdev);
}
static int ql_sfp_in(struct ql_adapter *qdev, struct mbox_params *mbcp)
@@ -849,7 +849,7 @@ void ql_mpi_idc_work(struct work_struct *work)
case MB_CMD_PORT_RESET:
case MB_CMD_SET_PORT_CFG:
case MB_CMD_STOP_FW:
- netif_carrier_off(qdev->ndev);
+ ql_link_off(qdev);
/* Signal the resulting link up AEN
* that the frame routing and mac addr
* needs to be set.
diff --git a/drivers/net/r6040.c b/drivers/net/r6040.c
index ed63d23a6452..961b5397a531 100644
--- a/drivers/net/r6040.c
+++ b/drivers/net/r6040.c
@@ -49,8 +49,8 @@
#include <asm/processor.h>
#define DRV_NAME "r6040"
-#define DRV_VERSION "0.23"
-#define DRV_RELDATE "05May2009"
+#define DRV_VERSION "0.24"
+#define DRV_RELDATE "08Jul2009"
/* PHY CHIP Address */
#define PHY1_ADDR 1 /* For MAC1 */
@@ -704,8 +704,11 @@ static irqreturn_t r6040_interrupt(int irq, void *dev_id)
/* Read MISR status and clear */
status = ioread16(ioaddr + MISR);
- if (status == 0x0000 || status == 0xffff)
+ if (status == 0x0000 || status == 0xffff) {
+ /* Restore RDC MAC interrupt */
+ iowrite16(misr, ioaddr + MIER);
return IRQ_NONE;
+ }
/* RX interrupt request */
if (status & RX_INTS) {
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
index 4e22462684c9..4b53b58d75fc 100644
--- a/drivers/net/r8169.c
+++ b/drivers/net/r8169.c
@@ -51,9 +51,6 @@
#define TX_BUFFS_AVAIL(tp) \
(tp->dirty_tx + NUM_TX_DESC - tp->cur_tx - 1)
-/* Maximum events (Rx packets, etc.) to handle at each interrupt. */
-static const int max_interrupt_work = 20;
-
/* Maximum number of multicast addresses to filter (vs. Rx-all-multicast).
The RTL chips use a 64 element hash table based on the Ethernet CRC. */
static const int multicast_filter_limit = 32;
diff --git a/drivers/net/s6gmac.c b/drivers/net/s6gmac.c
new file mode 100644
index 000000000000..5345e47b35ac
--- /dev/null
+++ b/drivers/net/s6gmac.c
@@ -0,0 +1,1073 @@
+/*
+ * Ethernet driver for S6105 on chip network device
+ * (c)2008 emlix GmbH http://www.emlix.com
+ * Authors: Oskar Schirmer <os@emlix.com>
+ * Daniel Gloeckner <dg@emlix.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/types.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/spinlock.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/if.h>
+#include <linux/stddef.h>
+#include <linux/mii.h>
+#include <linux/phy.h>
+#include <linux/platform_device.h>
+#include <variant/hardware.h>
+#include <variant/dmac.h>
+
+#define DRV_NAME "s6gmac"
+#define DRV_PRMT DRV_NAME ": "
+
+
+/* register declarations */
+
+#define S6_GMAC_MACCONF1 0x000
+#define S6_GMAC_MACCONF1_TXENA 0
+#define S6_GMAC_MACCONF1_SYNCTX 1
+#define S6_GMAC_MACCONF1_RXENA 2
+#define S6_GMAC_MACCONF1_SYNCRX 3
+#define S6_GMAC_MACCONF1_TXFLOWCTRL 4
+#define S6_GMAC_MACCONF1_RXFLOWCTRL 5
+#define S6_GMAC_MACCONF1_LOOPBACK 8
+#define S6_GMAC_MACCONF1_RESTXFUNC 16
+#define S6_GMAC_MACCONF1_RESRXFUNC 17
+#define S6_GMAC_MACCONF1_RESTXMACCTRL 18
+#define S6_GMAC_MACCONF1_RESRXMACCTRL 19
+#define S6_GMAC_MACCONF1_SIMULRES 30
+#define S6_GMAC_MACCONF1_SOFTRES 31
+#define S6_GMAC_MACCONF2 0x004
+#define S6_GMAC_MACCONF2_FULL 0
+#define S6_GMAC_MACCONF2_CRCENA 1
+#define S6_GMAC_MACCONF2_PADCRCENA 2
+#define S6_GMAC_MACCONF2_LENGTHFCHK 4
+#define S6_GMAC_MACCONF2_HUGEFRAMENA 5
+#define S6_GMAC_MACCONF2_IFMODE 8
+#define S6_GMAC_MACCONF2_IFMODE_NIBBLE 1
+#define S6_GMAC_MACCONF2_IFMODE_BYTE 2
+#define S6_GMAC_MACCONF2_IFMODE_MASK 3
+#define S6_GMAC_MACCONF2_PREAMBLELEN 12
+#define S6_GMAC_MACCONF2_PREAMBLELEN_MASK 0x0F
+#define S6_GMAC_MACIPGIFG 0x008
+#define S6_GMAC_MACIPGIFG_B2BINTERPGAP 0
+#define S6_GMAC_MACIPGIFG_B2BINTERPGAP_MASK 0x7F
+#define S6_GMAC_MACIPGIFG_MINIFGENFORCE 8
+#define S6_GMAC_MACIPGIFG_B2BINTERPGAP2 16
+#define S6_GMAC_MACIPGIFG_B2BINTERPGAP1 24
+#define S6_GMAC_MACHALFDUPLEX 0x00C
+#define S6_GMAC_MACHALFDUPLEX_COLLISWIN 0
+#define S6_GMAC_MACHALFDUPLEX_COLLISWIN_MASK 0x3F
+#define S6_GMAC_MACHALFDUPLEX_RETXMAX 12
+#define S6_GMAC_MACHALFDUPLEX_RETXMAX_MASK 0x0F
+#define S6_GMAC_MACHALFDUPLEX_EXCESSDEF 16
+#define S6_GMAC_MACHALFDUPLEX_NOBACKOFF 17
+#define S6_GMAC_MACHALFDUPLEX_BPNOBCKOF 18
+#define S6_GMAC_MACHALFDUPLEX_ALTBEBENA 19
+#define S6_GMAC_MACHALFDUPLEX_ALTBEBTRN 20
+#define S6_GMAC_MACHALFDUPLEX_ALTBEBTR_MASK 0x0F
+#define S6_GMAC_MACMAXFRAMELEN 0x010
+#define S6_GMAC_MACMIICONF 0x020
+#define S6_GMAC_MACMIICONF_CSEL 0
+#define S6_GMAC_MACMIICONF_CSEL_DIV10 0
+#define S6_GMAC_MACMIICONF_CSEL_DIV12 1
+#define S6_GMAC_MACMIICONF_CSEL_DIV14 2
+#define S6_GMAC_MACMIICONF_CSEL_DIV18 3
+#define S6_GMAC_MACMIICONF_CSEL_DIV24 4
+#define S6_GMAC_MACMIICONF_CSEL_DIV34 5
+#define S6_GMAC_MACMIICONF_CSEL_DIV68 6
+#define S6_GMAC_MACMIICONF_CSEL_DIV168 7
+#define S6_GMAC_MACMIICONF_CSEL_MASK 7
+#define S6_GMAC_MACMIICONF_PREAMBLESUPR 4
+#define S6_GMAC_MACMIICONF_SCANAUTOINCR 5
+#define S6_GMAC_MACMIICMD 0x024
+#define S6_GMAC_MACMIICMD_READ 0
+#define S6_GMAC_MACMIICMD_SCAN 1
+#define S6_GMAC_MACMIIADDR 0x028
+#define S6_GMAC_MACMIIADDR_REG 0
+#define S6_GMAC_MACMIIADDR_REG_MASK 0x1F
+#define S6_GMAC_MACMIIADDR_PHY 8
+#define S6_GMAC_MACMIIADDR_PHY_MASK 0x1F
+#define S6_GMAC_MACMIICTRL 0x02C
+#define S6_GMAC_MACMIISTAT 0x030
+#define S6_GMAC_MACMIIINDI 0x034
+#define S6_GMAC_MACMIIINDI_BUSY 0
+#define S6_GMAC_MACMIIINDI_SCAN 1
+#define S6_GMAC_MACMIIINDI_INVAL 2
+#define S6_GMAC_MACINTERFSTAT 0x03C
+#define S6_GMAC_MACINTERFSTAT_LINKFAIL 3
+#define S6_GMAC_MACINTERFSTAT_EXCESSDEF 9
+#define S6_GMAC_MACSTATADDR1 0x040
+#define S6_GMAC_MACSTATADDR2 0x044
+
+#define S6_GMAC_FIFOCONF0 0x048
+#define S6_GMAC_FIFOCONF0_HSTRSTWT 0
+#define S6_GMAC_FIFOCONF0_HSTRSTSR 1
+#define S6_GMAC_FIFOCONF0_HSTRSTFR 2
+#define S6_GMAC_FIFOCONF0_HSTRSTST 3
+#define S6_GMAC_FIFOCONF0_HSTRSTFT 4
+#define S6_GMAC_FIFOCONF0_WTMENREQ 8
+#define S6_GMAC_FIFOCONF0_SRFENREQ 9
+#define S6_GMAC_FIFOCONF0_FRFENREQ 10
+#define S6_GMAC_FIFOCONF0_STFENREQ 11
+#define S6_GMAC_FIFOCONF0_FTFENREQ 12
+#define S6_GMAC_FIFOCONF0_WTMENRPLY 16
+#define S6_GMAC_FIFOCONF0_SRFENRPLY 17
+#define S6_GMAC_FIFOCONF0_FRFENRPLY 18
+#define S6_GMAC_FIFOCONF0_STFENRPLY 19
+#define S6_GMAC_FIFOCONF0_FTFENRPLY 20
+#define S6_GMAC_FIFOCONF1 0x04C
+#define S6_GMAC_FIFOCONF2 0x050
+#define S6_GMAC_FIFOCONF2_CFGLWM 0
+#define S6_GMAC_FIFOCONF2_CFGHWM 16
+#define S6_GMAC_FIFOCONF3 0x054
+#define S6_GMAC_FIFOCONF3_CFGFTTH 0
+#define S6_GMAC_FIFOCONF3_CFGHWMFT 16
+#define S6_GMAC_FIFOCONF4 0x058
+#define S6_GMAC_FIFOCONF_RSV_PREVDROP 0
+#define S6_GMAC_FIFOCONF_RSV_RUNT 1
+#define S6_GMAC_FIFOCONF_RSV_FALSECAR 2
+#define S6_GMAC_FIFOCONF_RSV_CODEERR 3
+#define S6_GMAC_FIFOCONF_RSV_CRCERR 4
+#define S6_GMAC_FIFOCONF_RSV_LENGTHERR 5
+#define S6_GMAC_FIFOCONF_RSV_LENRANGE 6
+#define S6_GMAC_FIFOCONF_RSV_OK 7
+#define S6_GMAC_FIFOCONF_RSV_MULTICAST 8
+#define S6_GMAC_FIFOCONF_RSV_BROADCAST 9
+#define S6_GMAC_FIFOCONF_RSV_DRIBBLE 10
+#define S6_GMAC_FIFOCONF_RSV_CTRLFRAME 11
+#define S6_GMAC_FIFOCONF_RSV_PAUSECTRL 12
+#define S6_GMAC_FIFOCONF_RSV_UNOPCODE 13
+#define S6_GMAC_FIFOCONF_RSV_VLANTAG 14
+#define S6_GMAC_FIFOCONF_RSV_LONGEVENT 15
+#define S6_GMAC_FIFOCONF_RSV_TRUNCATED 16
+#define S6_GMAC_FIFOCONF_RSV_MASK 0x3FFFF
+#define S6_GMAC_FIFOCONF5 0x05C
+#define S6_GMAC_FIFOCONF5_DROPLT64 18
+#define S6_GMAC_FIFOCONF5_CFGBYTM 19
+#define S6_GMAC_FIFOCONF5_RXDROPSIZE 20
+#define S6_GMAC_FIFOCONF5_RXDROPSIZE_MASK 0xF
+
+#define S6_GMAC_STAT_REGS 0x080
+#define S6_GMAC_STAT_SIZE_MIN 12
+#define S6_GMAC_STATTR64 0x080
+#define S6_GMAC_STATTR64_SIZE 18
+#define S6_GMAC_STATTR127 0x084
+#define S6_GMAC_STATTR127_SIZE 18
+#define S6_GMAC_STATTR255 0x088
+#define S6_GMAC_STATTR255_SIZE 18
+#define S6_GMAC_STATTR511 0x08C
+#define S6_GMAC_STATTR511_SIZE 18
+#define S6_GMAC_STATTR1K 0x090
+#define S6_GMAC_STATTR1K_SIZE 18
+#define S6_GMAC_STATTRMAX 0x094
+#define S6_GMAC_STATTRMAX_SIZE 18
+#define S6_GMAC_STATTRMGV 0x098
+#define S6_GMAC_STATTRMGV_SIZE 18
+#define S6_GMAC_STATRBYT 0x09C
+#define S6_GMAC_STATRBYT_SIZE 24
+#define S6_GMAC_STATRPKT 0x0A0
+#define S6_GMAC_STATRPKT_SIZE 18
+#define S6_GMAC_STATRFCS 0x0A4
+#define S6_GMAC_STATRFCS_SIZE 12
+#define S6_GMAC_STATRMCA 0x0A8
+#define S6_GMAC_STATRMCA_SIZE 18
+#define S6_GMAC_STATRBCA 0x0AC
+#define S6_GMAC_STATRBCA_SIZE 22
+#define S6_GMAC_STATRXCF 0x0B0
+#define S6_GMAC_STATRXCF_SIZE 18
+#define S6_GMAC_STATRXPF 0x0B4
+#define S6_GMAC_STATRXPF_SIZE 12
+#define S6_GMAC_STATRXUO 0x0B8
+#define S6_GMAC_STATRXUO_SIZE 12
+#define S6_GMAC_STATRALN 0x0BC
+#define S6_GMAC_STATRALN_SIZE 12
+#define S6_GMAC_STATRFLR 0x0C0
+#define S6_GMAC_STATRFLR_SIZE 16
+#define S6_GMAC_STATRCDE 0x0C4
+#define S6_GMAC_STATRCDE_SIZE 12
+#define S6_GMAC_STATRCSE 0x0C8
+#define S6_GMAC_STATRCSE_SIZE 12
+#define S6_GMAC_STATRUND 0x0CC
+#define S6_GMAC_STATRUND_SIZE 12
+#define S6_GMAC_STATROVR 0x0D0
+#define S6_GMAC_STATROVR_SIZE 12
+#define S6_GMAC_STATRFRG 0x0D4
+#define S6_GMAC_STATRFRG_SIZE 12
+#define S6_GMAC_STATRJBR 0x0D8
+#define S6_GMAC_STATRJBR_SIZE 12
+#define S6_GMAC_STATRDRP 0x0DC
+#define S6_GMAC_STATRDRP_SIZE 12
+#define S6_GMAC_STATTBYT 0x0E0
+#define S6_GMAC_STATTBYT_SIZE 24
+#define S6_GMAC_STATTPKT 0x0E4
+#define S6_GMAC_STATTPKT_SIZE 18
+#define S6_GMAC_STATTMCA 0x0E8
+#define S6_GMAC_STATTMCA_SIZE 18
+#define S6_GMAC_STATTBCA 0x0EC
+#define S6_GMAC_STATTBCA_SIZE 18
+#define S6_GMAC_STATTXPF 0x0F0
+#define S6_GMAC_STATTXPF_SIZE 12
+#define S6_GMAC_STATTDFR 0x0F4
+#define S6_GMAC_STATTDFR_SIZE 12
+#define S6_GMAC_STATTEDF 0x0F8
+#define S6_GMAC_STATTEDF_SIZE 12
+#define S6_GMAC_STATTSCL 0x0FC
+#define S6_GMAC_STATTSCL_SIZE 12
+#define S6_GMAC_STATTMCL 0x100
+#define S6_GMAC_STATTMCL_SIZE 12
+#define S6_GMAC_STATTLCL 0x104
+#define S6_GMAC_STATTLCL_SIZE 12
+#define S6_GMAC_STATTXCL 0x108
+#define S6_GMAC_STATTXCL_SIZE 12
+#define S6_GMAC_STATTNCL 0x10C
+#define S6_GMAC_STATTNCL_SIZE 13
+#define S6_GMAC_STATTPFH 0x110
+#define S6_GMAC_STATTPFH_SIZE 12
+#define S6_GMAC_STATTDRP 0x114
+#define S6_GMAC_STATTDRP_SIZE 12
+#define S6_GMAC_STATTJBR 0x118
+#define S6_GMAC_STATTJBR_SIZE 12
+#define S6_GMAC_STATTFCS 0x11C
+#define S6_GMAC_STATTFCS_SIZE 12
+#define S6_GMAC_STATTXCF 0x120
+#define S6_GMAC_STATTXCF_SIZE 12
+#define S6_GMAC_STATTOVR 0x124
+#define S6_GMAC_STATTOVR_SIZE 12
+#define S6_GMAC_STATTUND 0x128
+#define S6_GMAC_STATTUND_SIZE 12
+#define S6_GMAC_STATTFRG 0x12C
+#define S6_GMAC_STATTFRG_SIZE 12
+#define S6_GMAC_STATCARRY(n) (0x130 + 4*(n))
+#define S6_GMAC_STATCARRYMSK(n) (0x138 + 4*(n))
+#define S6_GMAC_STATCARRY1_RDRP 0
+#define S6_GMAC_STATCARRY1_RJBR 1
+#define S6_GMAC_STATCARRY1_RFRG 2
+#define S6_GMAC_STATCARRY1_ROVR 3
+#define S6_GMAC_STATCARRY1_RUND 4
+#define S6_GMAC_STATCARRY1_RCSE 5
+#define S6_GMAC_STATCARRY1_RCDE 6
+#define S6_GMAC_STATCARRY1_RFLR 7
+#define S6_GMAC_STATCARRY1_RALN 8
+#define S6_GMAC_STATCARRY1_RXUO 9
+#define S6_GMAC_STATCARRY1_RXPF 10
+#define S6_GMAC_STATCARRY1_RXCF 11
+#define S6_GMAC_STATCARRY1_RBCA 12
+#define S6_GMAC_STATCARRY1_RMCA 13
+#define S6_GMAC_STATCARRY1_RFCS 14
+#define S6_GMAC_STATCARRY1_RPKT 15
+#define S6_GMAC_STATCARRY1_RBYT 16
+#define S6_GMAC_STATCARRY1_TRMGV 25
+#define S6_GMAC_STATCARRY1_TRMAX 26
+#define S6_GMAC_STATCARRY1_TR1K 27
+#define S6_GMAC_STATCARRY1_TR511 28
+#define S6_GMAC_STATCARRY1_TR255 29
+#define S6_GMAC_STATCARRY1_TR127 30
+#define S6_GMAC_STATCARRY1_TR64 31
+#define S6_GMAC_STATCARRY2_TDRP 0
+#define S6_GMAC_STATCARRY2_TPFH 1
+#define S6_GMAC_STATCARRY2_TNCL 2
+#define S6_GMAC_STATCARRY2_TXCL 3
+#define S6_GMAC_STATCARRY2_TLCL 4
+#define S6_GMAC_STATCARRY2_TMCL 5
+#define S6_GMAC_STATCARRY2_TSCL 6
+#define S6_GMAC_STATCARRY2_TEDF 7
+#define S6_GMAC_STATCARRY2_TDFR 8
+#define S6_GMAC_STATCARRY2_TXPF 9
+#define S6_GMAC_STATCARRY2_TBCA 10
+#define S6_GMAC_STATCARRY2_TMCA 11
+#define S6_GMAC_STATCARRY2_TPKT 12
+#define S6_GMAC_STATCARRY2_TBYT 13
+#define S6_GMAC_STATCARRY2_TFRG 14
+#define S6_GMAC_STATCARRY2_TUND 15
+#define S6_GMAC_STATCARRY2_TOVR 16
+#define S6_GMAC_STATCARRY2_TXCF 17
+#define S6_GMAC_STATCARRY2_TFCS 18
+#define S6_GMAC_STATCARRY2_TJBR 19
+
+#define S6_GMAC_HOST_PBLKCTRL 0x140
+#define S6_GMAC_HOST_PBLKCTRL_TXENA 0
+#define S6_GMAC_HOST_PBLKCTRL_RXENA 1
+#define S6_GMAC_HOST_PBLKCTRL_TXSRES 2
+#define S6_GMAC_HOST_PBLKCTRL_RXSRES 3
+#define S6_GMAC_HOST_PBLKCTRL_TXBSIZ 8
+#define S6_GMAC_HOST_PBLKCTRL_RXBSIZ 12
+#define S6_GMAC_HOST_PBLKCTRL_SIZ_16 4
+#define S6_GMAC_HOST_PBLKCTRL_SIZ_32 5
+#define S6_GMAC_HOST_PBLKCTRL_SIZ_64 6
+#define S6_GMAC_HOST_PBLKCTRL_SIZ_128 7
+#define S6_GMAC_HOST_PBLKCTRL_SIZ_MASK 0xF
+#define S6_GMAC_HOST_PBLKCTRL_STATENA 16
+#define S6_GMAC_HOST_PBLKCTRL_STATAUTOZ 17
+#define S6_GMAC_HOST_PBLKCTRL_STATCLEAR 18
+#define S6_GMAC_HOST_PBLKCTRL_RGMII 19
+#define S6_GMAC_HOST_INTMASK 0x144
+#define S6_GMAC_HOST_INTSTAT 0x148
+#define S6_GMAC_HOST_INT_TXBURSTOVER 3
+#define S6_GMAC_HOST_INT_TXPREWOVER 4
+#define S6_GMAC_HOST_INT_RXBURSTUNDER 5
+#define S6_GMAC_HOST_INT_RXPOSTRFULL 6
+#define S6_GMAC_HOST_INT_RXPOSTRUNDER 7
+#define S6_GMAC_HOST_RXFIFOHWM 0x14C
+#define S6_GMAC_HOST_CTRLFRAMXP 0x150
+#define S6_GMAC_HOST_DSTADDRLO(n) (0x160 + 8*(n))
+#define S6_GMAC_HOST_DSTADDRHI(n) (0x164 + 8*(n))
+#define S6_GMAC_HOST_DSTMASKLO(n) (0x180 + 8*(n))
+#define S6_GMAC_HOST_DSTMASKHI(n) (0x184 + 8*(n))
+
+#define S6_GMAC_BURST_PREWR 0x1B0
+#define S6_GMAC_BURST_PREWR_LEN 0
+#define S6_GMAC_BURST_PREWR_LEN_MASK ((1 << 20) - 1)
+#define S6_GMAC_BURST_PREWR_CFE 20
+#define S6_GMAC_BURST_PREWR_PPE 21
+#define S6_GMAC_BURST_PREWR_FCS 22
+#define S6_GMAC_BURST_PREWR_PAD 23
+#define S6_GMAC_BURST_POSTRD 0x1D0
+#define S6_GMAC_BURST_POSTRD_LEN 0
+#define S6_GMAC_BURST_POSTRD_LEN_MASK ((1 << 20) - 1)
+#define S6_GMAC_BURST_POSTRD_DROP 20
+
+
+/* data handling */
+
+#define S6_NUM_TX_SKB 8 /* must be larger than TX fifo size */
+#define S6_NUM_RX_SKB 16
+#define S6_MAX_FRLEN 1536
+
+struct s6gmac {
+ u32 reg;
+ u32 tx_dma;
+ u32 rx_dma;
+ u32 io;
+ u8 tx_chan;
+ u8 rx_chan;
+ spinlock_t lock;
+ u8 tx_skb_i, tx_skb_o;
+ u8 rx_skb_i, rx_skb_o;
+ struct sk_buff *tx_skb[S6_NUM_TX_SKB];
+ struct sk_buff *rx_skb[S6_NUM_RX_SKB];
+ unsigned long carry[sizeof(struct net_device_stats) / sizeof(long)];
+ unsigned long stats[sizeof(struct net_device_stats) / sizeof(long)];
+ struct phy_device *phydev;
+ struct {
+ struct mii_bus *bus;
+ int irq[PHY_MAX_ADDR];
+ } mii;
+ struct {
+ unsigned int mbit;
+ u8 giga;
+ u8 isup;
+ u8 full;
+ } link;
+};
+
+static void s6gmac_rx_fillfifo(struct s6gmac *pd)
+{
+ struct sk_buff *skb;
+ while ((((u8)(pd->rx_skb_i - pd->rx_skb_o)) < S6_NUM_RX_SKB)
+ && (!s6dmac_fifo_full(pd->rx_dma, pd->rx_chan))
+ && (skb = dev_alloc_skb(S6_MAX_FRLEN + 2))) {
+ pd->rx_skb[(pd->rx_skb_i++) % S6_NUM_RX_SKB] = skb;
+ s6dmac_put_fifo_cache(pd->rx_dma, pd->rx_chan,
+ pd->io, (u32)skb->data, S6_MAX_FRLEN);
+ }
+}
+
+static void s6gmac_rx_interrupt(struct net_device *dev)
+{
+ struct s6gmac *pd = netdev_priv(dev);
+ u32 pfx;
+ struct sk_buff *skb;
+ while (((u8)(pd->rx_skb_i - pd->rx_skb_o)) >
+ s6dmac_pending_count(pd->rx_dma, pd->rx_chan)) {
+ skb = pd->rx_skb[(pd->rx_skb_o++) % S6_NUM_RX_SKB];
+ pfx = readl(pd->reg + S6_GMAC_BURST_POSTRD);
+ if (pfx & (1 << S6_GMAC_BURST_POSTRD_DROP)) {
+ dev_kfree_skb_irq(skb);
+ } else {
+ skb_put(skb, (pfx >> S6_GMAC_BURST_POSTRD_LEN)
+ & S6_GMAC_BURST_POSTRD_LEN_MASK);
+ skb->dev = dev;
+ skb->protocol = eth_type_trans(skb, dev);
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ netif_rx(skb);
+ }
+ }
+}
+
+static void s6gmac_tx_interrupt(struct net_device *dev)
+{
+ struct s6gmac *pd = netdev_priv(dev);
+ while (((u8)(pd->tx_skb_i - pd->tx_skb_o)) >
+ s6dmac_pending_count(pd->tx_dma, pd->tx_chan)) {
+ dev_kfree_skb_irq(pd->tx_skb[(pd->tx_skb_o++) % S6_NUM_TX_SKB]);
+ }
+ if (!s6dmac_fifo_full(pd->tx_dma, pd->tx_chan))
+ netif_wake_queue(dev);
+}
+
+struct s6gmac_statinf {
+ unsigned reg_size : 4; /* 0: unused */
+ unsigned reg_off : 6;
+ unsigned net_index : 6;
+};
+
+#define S6_STATS_B (8 * sizeof(u32))
+#define S6_STATS_C(b, r, f) [b] = { \
+ BUILD_BUG_ON_ZERO(r##_SIZE < S6_GMAC_STAT_SIZE_MIN) + \
+ BUILD_BUG_ON_ZERO((r##_SIZE - (S6_GMAC_STAT_SIZE_MIN - 1)) \
+ >= (1<<4)) + \
+ r##_SIZE - (S6_GMAC_STAT_SIZE_MIN - 1), \
+ BUILD_BUG_ON_ZERO(((unsigned)((r - S6_GMAC_STAT_REGS) / sizeof(u32))) \
+ >= ((1<<6)-1)) + \
+ (r - S6_GMAC_STAT_REGS) / sizeof(u32), \
+ BUILD_BUG_ON_ZERO((offsetof(struct net_device_stats, f)) \
+ % sizeof(unsigned long)) + \
+ BUILD_BUG_ON_ZERO((((unsigned)(offsetof(struct net_device_stats, f)) \
+ / sizeof(unsigned long)) >= (1<<6))) + \
+ BUILD_BUG_ON_ZERO((sizeof(((struct net_device_stats *)0)->f) \
+ != sizeof(unsigned long))) + \
+ (offsetof(struct net_device_stats, f)) / sizeof(unsigned long)},
+
+static const struct s6gmac_statinf statinf[2][S6_STATS_B] = { {
+ S6_STATS_C(S6_GMAC_STATCARRY1_RBYT, S6_GMAC_STATRBYT, rx_bytes)
+ S6_STATS_C(S6_GMAC_STATCARRY1_RPKT, S6_GMAC_STATRPKT, rx_packets)
+ S6_STATS_C(S6_GMAC_STATCARRY1_RFCS, S6_GMAC_STATRFCS, rx_crc_errors)
+ S6_STATS_C(S6_GMAC_STATCARRY1_RMCA, S6_GMAC_STATRMCA, multicast)
+ S6_STATS_C(S6_GMAC_STATCARRY1_RALN, S6_GMAC_STATRALN, rx_frame_errors)
+ S6_STATS_C(S6_GMAC_STATCARRY1_RFLR, S6_GMAC_STATRFLR, rx_length_errors)
+ S6_STATS_C(S6_GMAC_STATCARRY1_RCDE, S6_GMAC_STATRCDE, rx_missed_errors)
+ S6_STATS_C(S6_GMAC_STATCARRY1_RUND, S6_GMAC_STATRUND, rx_length_errors)
+ S6_STATS_C(S6_GMAC_STATCARRY1_ROVR, S6_GMAC_STATROVR, rx_length_errors)
+ S6_STATS_C(S6_GMAC_STATCARRY1_RFRG, S6_GMAC_STATRFRG, rx_crc_errors)
+ S6_STATS_C(S6_GMAC_STATCARRY1_RJBR, S6_GMAC_STATRJBR, rx_crc_errors)
+ S6_STATS_C(S6_GMAC_STATCARRY1_RDRP, S6_GMAC_STATRDRP, rx_dropped)
+}, {
+ S6_STATS_C(S6_GMAC_STATCARRY2_TBYT, S6_GMAC_STATTBYT, tx_bytes)
+ S6_STATS_C(S6_GMAC_STATCARRY2_TPKT, S6_GMAC_STATTPKT, tx_packets)
+ S6_STATS_C(S6_GMAC_STATCARRY2_TEDF, S6_GMAC_STATTEDF, tx_aborted_errors)
+ S6_STATS_C(S6_GMAC_STATCARRY2_TXCL, S6_GMAC_STATTXCL, tx_aborted_errors)
+ S6_STATS_C(S6_GMAC_STATCARRY2_TNCL, S6_GMAC_STATTNCL, collisions)
+ S6_STATS_C(S6_GMAC_STATCARRY2_TDRP, S6_GMAC_STATTDRP, tx_dropped)
+ S6_STATS_C(S6_GMAC_STATCARRY2_TJBR, S6_GMAC_STATTJBR, tx_errors)
+ S6_STATS_C(S6_GMAC_STATCARRY2_TFCS, S6_GMAC_STATTFCS, tx_errors)
+ S6_STATS_C(S6_GMAC_STATCARRY2_TOVR, S6_GMAC_STATTOVR, tx_errors)
+ S6_STATS_C(S6_GMAC_STATCARRY2_TUND, S6_GMAC_STATTUND, tx_errors)
+ S6_STATS_C(S6_GMAC_STATCARRY2_TFRG, S6_GMAC_STATTFRG, tx_errors)
+} };
+
+static void s6gmac_stats_collect(struct s6gmac *pd,
+ const struct s6gmac_statinf *inf)
+{
+ int b;
+ for (b = 0; b < S6_STATS_B; b++) {
+ if (inf[b].reg_size) {
+ pd->stats[inf[b].net_index] +=
+ readl(pd->reg + S6_GMAC_STAT_REGS
+ + sizeof(u32) * inf[b].reg_off);
+ }
+ }
+}
+
+static void s6gmac_stats_carry(struct s6gmac *pd,
+ const struct s6gmac_statinf *inf, u32 mask)
+{
+ int b;
+ while (mask) {
+ b = fls(mask) - 1;
+ mask &= ~(1 << b);
+ pd->carry[inf[b].net_index] += (1 << inf[b].reg_size);
+ }
+}
+
+static inline u32 s6gmac_stats_pending(struct s6gmac *pd, int carry)
+{
+ int r = readl(pd->reg + S6_GMAC_STATCARRY(carry)) &
+ ~readl(pd->reg + S6_GMAC_STATCARRYMSK(carry));
+ return r;
+}
+
+static inline void s6gmac_stats_interrupt(struct s6gmac *pd, int carry)
+{
+ u32 mask;
+ mask = s6gmac_stats_pending(pd, carry);
+ if (mask) {
+ writel(mask, pd->reg + S6_GMAC_STATCARRY(carry));
+ s6gmac_stats_carry(pd, &statinf[carry][0], mask);
+ }
+}
+
+static irqreturn_t s6gmac_interrupt(int irq, void *dev_id)
+{
+ struct net_device *dev = (struct net_device *)dev_id;
+ struct s6gmac *pd = netdev_priv(dev);
+ if (!dev)
+ return IRQ_NONE;
+ spin_lock(&pd->lock);
+ if (s6dmac_termcnt_irq(pd->rx_dma, pd->rx_chan))
+ s6gmac_rx_interrupt(dev);
+ s6gmac_rx_fillfifo(pd);
+ if (s6dmac_termcnt_irq(pd->tx_dma, pd->tx_chan))
+ s6gmac_tx_interrupt(dev);
+ s6gmac_stats_interrupt(pd, 0);
+ s6gmac_stats_interrupt(pd, 1);
+ spin_unlock(&pd->lock);
+ return IRQ_HANDLED;
+}
+
+static inline void s6gmac_set_dstaddr(struct s6gmac *pd, int n,
+ u32 addrlo, u32 addrhi, u32 masklo, u32 maskhi)
+{
+ writel(addrlo, pd->reg + S6_GMAC_HOST_DSTADDRLO(n));
+ writel(addrhi, pd->reg + S6_GMAC_HOST_DSTADDRHI(n));
+ writel(masklo, pd->reg + S6_GMAC_HOST_DSTMASKLO(n));
+ writel(maskhi, pd->reg + S6_GMAC_HOST_DSTMASKHI(n));
+}
+
+static inline void s6gmac_stop_device(struct net_device *dev)
+{
+ struct s6gmac *pd = netdev_priv(dev);
+ writel(0, pd->reg + S6_GMAC_MACCONF1);
+}
+
+static inline void s6gmac_init_device(struct net_device *dev)
+{
+ struct s6gmac *pd = netdev_priv(dev);
+ int is_rgmii = !!(pd->phydev->supported
+ & (SUPPORTED_1000baseT_Full | SUPPORTED_1000baseT_Half));
+#if 0
+ writel(1 << S6_GMAC_MACCONF1_SYNCTX |
+ 1 << S6_GMAC_MACCONF1_SYNCRX |
+ 1 << S6_GMAC_MACCONF1_TXFLOWCTRL |
+ 1 << S6_GMAC_MACCONF1_RXFLOWCTRL |
+ 1 << S6_GMAC_MACCONF1_RESTXFUNC |
+ 1 << S6_GMAC_MACCONF1_RESRXFUNC |
+ 1 << S6_GMAC_MACCONF1_RESTXMACCTRL |
+ 1 << S6_GMAC_MACCONF1_RESRXMACCTRL,
+ pd->reg + S6_GMAC_MACCONF1);
+#endif
+ writel(1 << S6_GMAC_MACCONF1_SOFTRES, pd->reg + S6_GMAC_MACCONF1);
+ udelay(1000);
+ writel(1 << S6_GMAC_MACCONF1_TXENA | 1 << S6_GMAC_MACCONF1_RXENA,
+ pd->reg + S6_GMAC_MACCONF1);
+ writel(1 << S6_GMAC_HOST_PBLKCTRL_TXSRES |
+ 1 << S6_GMAC_HOST_PBLKCTRL_RXSRES,
+ pd->reg + S6_GMAC_HOST_PBLKCTRL);
+ writel(S6_GMAC_HOST_PBLKCTRL_SIZ_128 << S6_GMAC_HOST_PBLKCTRL_TXBSIZ |
+ S6_GMAC_HOST_PBLKCTRL_SIZ_128 << S6_GMAC_HOST_PBLKCTRL_RXBSIZ |
+ 1 << S6_GMAC_HOST_PBLKCTRL_STATENA |
+ 1 << S6_GMAC_HOST_PBLKCTRL_STATCLEAR |
+ is_rgmii << S6_GMAC_HOST_PBLKCTRL_RGMII,
+ pd->reg + S6_GMAC_HOST_PBLKCTRL);
+ writel(1 << S6_GMAC_MACCONF1_TXENA |
+ 1 << S6_GMAC_MACCONF1_RXENA |
+ (dev->flags & IFF_LOOPBACK ? 1 : 0)
+ << S6_GMAC_MACCONF1_LOOPBACK,
+ pd->reg + S6_GMAC_MACCONF1);
+ writel(dev->mtu && (dev->mtu < (S6_MAX_FRLEN - ETH_HLEN-ETH_FCS_LEN)) ?
+ dev->mtu+ETH_HLEN+ETH_FCS_LEN : S6_MAX_FRLEN,
+ pd->reg + S6_GMAC_MACMAXFRAMELEN);
+ writel((pd->link.full ? 1 : 0) << S6_GMAC_MACCONF2_FULL |
+ 1 << S6_GMAC_MACCONF2_PADCRCENA |
+ 1 << S6_GMAC_MACCONF2_LENGTHFCHK |
+ (pd->link.giga ?
+ S6_GMAC_MACCONF2_IFMODE_BYTE :
+ S6_GMAC_MACCONF2_IFMODE_NIBBLE)
+ << S6_GMAC_MACCONF2_IFMODE |
+ 7 << S6_GMAC_MACCONF2_PREAMBLELEN,
+ pd->reg + S6_GMAC_MACCONF2);
+ writel(0, pd->reg + S6_GMAC_MACSTATADDR1);
+ writel(0, pd->reg + S6_GMAC_MACSTATADDR2);
+ writel(1 << S6_GMAC_FIFOCONF0_WTMENREQ |
+ 1 << S6_GMAC_FIFOCONF0_SRFENREQ |
+ 1 << S6_GMAC_FIFOCONF0_FRFENREQ |
+ 1 << S6_GMAC_FIFOCONF0_STFENREQ |
+ 1 << S6_GMAC_FIFOCONF0_FTFENREQ,
+ pd->reg + S6_GMAC_FIFOCONF0);
+ writel(128 << S6_GMAC_FIFOCONF3_CFGFTTH |
+ 128 << S6_GMAC_FIFOCONF3_CFGHWMFT,
+ pd->reg + S6_GMAC_FIFOCONF3);
+ writel((S6_GMAC_FIFOCONF_RSV_MASK & ~(
+ 1 << S6_GMAC_FIFOCONF_RSV_RUNT |
+ 1 << S6_GMAC_FIFOCONF_RSV_CRCERR |
+ 1 << S6_GMAC_FIFOCONF_RSV_OK |
+ 1 << S6_GMAC_FIFOCONF_RSV_DRIBBLE |
+ 1 << S6_GMAC_FIFOCONF_RSV_CTRLFRAME |
+ 1 << S6_GMAC_FIFOCONF_RSV_PAUSECTRL |
+ 1 << S6_GMAC_FIFOCONF_RSV_UNOPCODE |
+ 1 << S6_GMAC_FIFOCONF_RSV_TRUNCATED)) |
+ 1 << S6_GMAC_FIFOCONF5_DROPLT64 |
+ pd->link.giga << S6_GMAC_FIFOCONF5_CFGBYTM |
+ 1 << S6_GMAC_FIFOCONF5_RXDROPSIZE,
+ pd->reg + S6_GMAC_FIFOCONF5);
+ writel(1 << S6_GMAC_FIFOCONF_RSV_RUNT |
+ 1 << S6_GMAC_FIFOCONF_RSV_CRCERR |
+ 1 << S6_GMAC_FIFOCONF_RSV_DRIBBLE |
+ 1 << S6_GMAC_FIFOCONF_RSV_CTRLFRAME |
+ 1 << S6_GMAC_FIFOCONF_RSV_PAUSECTRL |
+ 1 << S6_GMAC_FIFOCONF_RSV_UNOPCODE |
+ 1 << S6_GMAC_FIFOCONF_RSV_TRUNCATED,
+ pd->reg + S6_GMAC_FIFOCONF4);
+ s6gmac_set_dstaddr(pd, 0,
+ 0xFFFFFFFF, 0x0000FFFF, 0xFFFFFFFF, 0x0000FFFF);
+ s6gmac_set_dstaddr(pd, 1,
+ dev->dev_addr[5] |
+ dev->dev_addr[4] << 8 |
+ dev->dev_addr[3] << 16 |
+ dev->dev_addr[2] << 24,
+ dev->dev_addr[1] |
+ dev->dev_addr[0] << 8,
+ 0xFFFFFFFF, 0x0000FFFF);
+ s6gmac_set_dstaddr(pd, 2,
+ 0x00000000, 0x00000100, 0x00000000, 0x00000100);
+ s6gmac_set_dstaddr(pd, 3,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000);
+ writel(1 << S6_GMAC_HOST_PBLKCTRL_TXENA |
+ 1 << S6_GMAC_HOST_PBLKCTRL_RXENA |
+ S6_GMAC_HOST_PBLKCTRL_SIZ_128 << S6_GMAC_HOST_PBLKCTRL_TXBSIZ |
+ S6_GMAC_HOST_PBLKCTRL_SIZ_128 << S6_GMAC_HOST_PBLKCTRL_RXBSIZ |
+ 1 << S6_GMAC_HOST_PBLKCTRL_STATENA |
+ 1 << S6_GMAC_HOST_PBLKCTRL_STATCLEAR |
+ is_rgmii << S6_GMAC_HOST_PBLKCTRL_RGMII,
+ pd->reg + S6_GMAC_HOST_PBLKCTRL);
+}
+
+static void s6mii_enable(struct s6gmac *pd)
+{
+ writel(readl(pd->reg + S6_GMAC_MACCONF1) &
+ ~(1 << S6_GMAC_MACCONF1_SOFTRES),
+ pd->reg + S6_GMAC_MACCONF1);
+ writel((readl(pd->reg + S6_GMAC_MACMIICONF)
+ & ~(S6_GMAC_MACMIICONF_CSEL_MASK << S6_GMAC_MACMIICONF_CSEL))
+ | (S6_GMAC_MACMIICONF_CSEL_DIV168 << S6_GMAC_MACMIICONF_CSEL),
+ pd->reg + S6_GMAC_MACMIICONF);
+}
+
+static int s6mii_busy(struct s6gmac *pd, int tmo)
+{
+ while (readl(pd->reg + S6_GMAC_MACMIIINDI)) {
+ if (--tmo == 0)
+ return -ETIME;
+ udelay(64);
+ }
+ return 0;
+}
+
+static int s6mii_read(struct mii_bus *bus, int phy_addr, int regnum)
+{
+ struct s6gmac *pd = bus->priv;
+ s6mii_enable(pd);
+ if (s6mii_busy(pd, 256))
+ return -ETIME;
+ writel(phy_addr << S6_GMAC_MACMIIADDR_PHY |
+ regnum << S6_GMAC_MACMIIADDR_REG,
+ pd->reg + S6_GMAC_MACMIIADDR);
+ writel(1 << S6_GMAC_MACMIICMD_READ, pd->reg + S6_GMAC_MACMIICMD);
+ writel(0, pd->reg + S6_GMAC_MACMIICMD);
+ if (s6mii_busy(pd, 256))
+ return -ETIME;
+ return (u16)readl(pd->reg + S6_GMAC_MACMIISTAT);
+}
+
+static int s6mii_write(struct mii_bus *bus, int phy_addr, int regnum, u16 value)
+{
+ struct s6gmac *pd = bus->priv;
+ s6mii_enable(pd);
+ if (s6mii_busy(pd, 256))
+ return -ETIME;
+ writel(phy_addr << S6_GMAC_MACMIIADDR_PHY |
+ regnum << S6_GMAC_MACMIIADDR_REG,
+ pd->reg + S6_GMAC_MACMIIADDR);
+ writel(value, pd->reg + S6_GMAC_MACMIICTRL);
+ if (s6mii_busy(pd, 256))
+ return -ETIME;
+ return 0;
+}
+
+static int s6mii_reset(struct mii_bus *bus)
+{
+ struct s6gmac *pd = bus->priv;
+ s6mii_enable(pd);
+ if (s6mii_busy(pd, PHY_INIT_TIMEOUT))
+ return -ETIME;
+ return 0;
+}
+
+static void s6gmac_set_rgmii_txclock(struct s6gmac *pd)
+{
+ u32 pllsel = readl(S6_REG_GREG1 + S6_GREG1_PLLSEL);
+ pllsel &= ~(S6_GREG1_PLLSEL_GMAC_MASK << S6_GREG1_PLLSEL_GMAC);
+ switch (pd->link.mbit) {
+ case 10:
+ pllsel |= S6_GREG1_PLLSEL_GMAC_2500KHZ << S6_GREG1_PLLSEL_GMAC;
+ break;
+ case 100:
+ pllsel |= S6_GREG1_PLLSEL_GMAC_25MHZ << S6_GREG1_PLLSEL_GMAC;
+ break;
+ case 1000:
+ pllsel |= S6_GREG1_PLLSEL_GMAC_125MHZ << S6_GREG1_PLLSEL_GMAC;
+ break;
+ default:
+ return;
+ }
+ writel(pllsel, S6_REG_GREG1 + S6_GREG1_PLLSEL);
+}
+
+static inline void s6gmac_linkisup(struct net_device *dev, int isup)
+{
+ struct s6gmac *pd = netdev_priv(dev);
+ struct phy_device *phydev = pd->phydev;
+
+ pd->link.full = phydev->duplex;
+ pd->link.giga = (phydev->speed == 1000);
+ if (pd->link.mbit != phydev->speed) {
+ pd->link.mbit = phydev->speed;
+ s6gmac_set_rgmii_txclock(pd);
+ }
+ pd->link.isup = isup;
+ if (isup)
+ netif_carrier_on(dev);
+ phy_print_status(phydev);
+}
+
+static void s6gmac_adjust_link(struct net_device *dev)
+{
+ struct s6gmac *pd = netdev_priv(dev);
+ struct phy_device *phydev = pd->phydev;
+ if (pd->link.isup &&
+ (!phydev->link ||
+ (pd->link.mbit != phydev->speed) ||
+ (pd->link.full != phydev->duplex))) {
+ pd->link.isup = 0;
+ netif_tx_disable(dev);
+ if (!phydev->link) {
+ netif_carrier_off(dev);
+ phy_print_status(phydev);
+ }
+ }
+ if (!pd->link.isup && phydev->link) {
+ if (pd->link.full != phydev->duplex) {
+ u32 maccfg = readl(pd->reg + S6_GMAC_MACCONF2);
+ if (phydev->duplex)
+ maccfg |= 1 << S6_GMAC_MACCONF2_FULL;
+ else
+ maccfg &= ~(1 << S6_GMAC_MACCONF2_FULL);
+ writel(maccfg, pd->reg + S6_GMAC_MACCONF2);
+ }
+
+ if (pd->link.giga != (phydev->speed == 1000)) {
+ u32 fifocfg = readl(pd->reg + S6_GMAC_FIFOCONF5);
+ u32 maccfg = readl(pd->reg + S6_GMAC_MACCONF2);
+ maccfg &= ~(S6_GMAC_MACCONF2_IFMODE_MASK
+ << S6_GMAC_MACCONF2_IFMODE);
+ if (phydev->speed == 1000) {
+ fifocfg |= 1 << S6_GMAC_FIFOCONF5_CFGBYTM;
+ maccfg |= S6_GMAC_MACCONF2_IFMODE_BYTE
+ << S6_GMAC_MACCONF2_IFMODE;
+ } else {
+ fifocfg &= ~(1 << S6_GMAC_FIFOCONF5_CFGBYTM);
+ maccfg |= S6_GMAC_MACCONF2_IFMODE_NIBBLE
+ << S6_GMAC_MACCONF2_IFMODE;
+ }
+ writel(fifocfg, pd->reg + S6_GMAC_FIFOCONF5);
+ writel(maccfg, pd->reg + S6_GMAC_MACCONF2);
+ }
+
+ if (!s6dmac_fifo_full(pd->tx_dma, pd->tx_chan))
+ netif_wake_queue(dev);
+ s6gmac_linkisup(dev, 1);
+ }
+}
+
+static inline int s6gmac_phy_start(struct net_device *dev)
+{
+ struct s6gmac *pd = netdev_priv(dev);
+ int i = 0;
+ struct phy_device *p = NULL;
+ while ((!(p = pd->mii.bus->phy_map[i])) && (i < PHY_MAX_ADDR))
+ i++;
+ p = phy_connect(dev, dev_name(&p->dev), &s6gmac_adjust_link, 0,
+ PHY_INTERFACE_MODE_RGMII);
+ if (IS_ERR(p)) {
+ printk(KERN_ERR "%s: Could not attach to PHY\n", dev->name);
+ return PTR_ERR(p);
+ }
+ p->supported &= PHY_GBIT_FEATURES;
+ p->advertising = p->supported;
+ pd->phydev = p;
+ return 0;
+}
+
+static inline void s6gmac_init_stats(struct net_device *dev)
+{
+ struct s6gmac *pd = netdev_priv(dev);
+ u32 mask;
+ mask = 1 << S6_GMAC_STATCARRY1_RDRP |
+ 1 << S6_GMAC_STATCARRY1_RJBR |
+ 1 << S6_GMAC_STATCARRY1_RFRG |
+ 1 << S6_GMAC_STATCARRY1_ROVR |
+ 1 << S6_GMAC_STATCARRY1_RUND |
+ 1 << S6_GMAC_STATCARRY1_RCDE |
+ 1 << S6_GMAC_STATCARRY1_RFLR |
+ 1 << S6_GMAC_STATCARRY1_RALN |
+ 1 << S6_GMAC_STATCARRY1_RMCA |
+ 1 << S6_GMAC_STATCARRY1_RFCS |
+ 1 << S6_GMAC_STATCARRY1_RPKT |
+ 1 << S6_GMAC_STATCARRY1_RBYT;
+ writel(mask, pd->reg + S6_GMAC_STATCARRY(0));
+ writel(~mask, pd->reg + S6_GMAC_STATCARRYMSK(0));
+ mask = 1 << S6_GMAC_STATCARRY2_TDRP |
+ 1 << S6_GMAC_STATCARRY2_TNCL |
+ 1 << S6_GMAC_STATCARRY2_TXCL |
+ 1 << S6_GMAC_STATCARRY2_TEDF |
+ 1 << S6_GMAC_STATCARRY2_TPKT |
+ 1 << S6_GMAC_STATCARRY2_TBYT |
+ 1 << S6_GMAC_STATCARRY2_TFRG |
+ 1 << S6_GMAC_STATCARRY2_TUND |
+ 1 << S6_GMAC_STATCARRY2_TOVR |
+ 1 << S6_GMAC_STATCARRY2_TFCS |
+ 1 << S6_GMAC_STATCARRY2_TJBR;
+ writel(mask, pd->reg + S6_GMAC_STATCARRY(1));
+ writel(~mask, pd->reg + S6_GMAC_STATCARRYMSK(1));
+}
+
+static inline void s6gmac_init_dmac(struct net_device *dev)
+{
+ struct s6gmac *pd = netdev_priv(dev);
+ s6dmac_disable_chan(pd->tx_dma, pd->tx_chan);
+ s6dmac_disable_chan(pd->rx_dma, pd->rx_chan);
+ s6dmac_disable_error_irqs(pd->tx_dma, 1 << S6_HIFDMA_GMACTX);
+ s6dmac_disable_error_irqs(pd->rx_dma, 1 << S6_HIFDMA_GMACRX);
+}
+
+static int s6gmac_tx(struct sk_buff *skb, struct net_device *dev)
+{
+ struct s6gmac *pd = netdev_priv(dev);
+ unsigned long flags;
+ spin_lock_irqsave(&pd->lock, flags);
+ dev->trans_start = jiffies;
+ writel(skb->len << S6_GMAC_BURST_PREWR_LEN |
+ 0 << S6_GMAC_BURST_PREWR_CFE |
+ 1 << S6_GMAC_BURST_PREWR_PPE |
+ 1 << S6_GMAC_BURST_PREWR_FCS |
+ ((skb->len < ETH_ZLEN) ? 1 : 0) << S6_GMAC_BURST_PREWR_PAD,
+ pd->reg + S6_GMAC_BURST_PREWR);
+ s6dmac_put_fifo_cache(pd->tx_dma, pd->tx_chan,
+ (u32)skb->data, pd->io, skb->len);
+ if (s6dmac_fifo_full(pd->tx_dma, pd->tx_chan))
+ netif_stop_queue(dev);
+ if (((u8)(pd->tx_skb_i - pd->tx_skb_o)) >= S6_NUM_TX_SKB) {
+ printk(KERN_ERR "GMAC BUG: skb tx ring overflow [%x, %x]\n",
+ pd->tx_skb_o, pd->tx_skb_i);
+ BUG();
+ }
+ pd->tx_skb[(pd->tx_skb_i++) % S6_NUM_TX_SKB] = skb;
+ spin_unlock_irqrestore(&pd->lock, flags);
+ return 0;
+}
+
+static void s6gmac_tx_timeout(struct net_device *dev)
+{
+ struct s6gmac *pd = netdev_priv(dev);
+ unsigned long flags;
+ spin_lock_irqsave(&pd->lock, flags);
+ s6gmac_tx_interrupt(dev);
+ spin_unlock_irqrestore(&pd->lock, flags);
+}
+
+static int s6gmac_open(struct net_device *dev)
+{
+ struct s6gmac *pd = netdev_priv(dev);
+ unsigned long flags;
+ phy_read_status(pd->phydev);
+ spin_lock_irqsave(&pd->lock, flags);
+ pd->link.mbit = 0;
+ s6gmac_linkisup(dev, pd->phydev->link);
+ s6gmac_init_device(dev);
+ s6gmac_init_stats(dev);
+ s6gmac_init_dmac(dev);
+ s6gmac_rx_fillfifo(pd);
+ s6dmac_enable_chan(pd->rx_dma, pd->rx_chan,
+ 2, 1, 0, 1, 0, 0, 0, 7, -1, 2, 0, 1);
+ s6dmac_enable_chan(pd->tx_dma, pd->tx_chan,
+ 2, 0, 1, 0, 0, 0, 0, 7, -1, 2, 0, 1);
+ writel(0 << S6_GMAC_HOST_INT_TXBURSTOVER |
+ 0 << S6_GMAC_HOST_INT_TXPREWOVER |
+ 0 << S6_GMAC_HOST_INT_RXBURSTUNDER |
+ 0 << S6_GMAC_HOST_INT_RXPOSTRFULL |
+ 0 << S6_GMAC_HOST_INT_RXPOSTRUNDER,
+ pd->reg + S6_GMAC_HOST_INTMASK);
+ spin_unlock_irqrestore(&pd->lock, flags);
+ phy_start(pd->phydev);
+ netif_start_queue(dev);
+ return 0;
+}
+
+static int s6gmac_stop(struct net_device *dev)
+{
+ struct s6gmac *pd = netdev_priv(dev);
+ unsigned long flags;
+ netif_stop_queue(dev);
+ phy_stop(pd->phydev);
+ spin_lock_irqsave(&pd->lock, flags);
+ s6gmac_init_dmac(dev);
+ s6gmac_stop_device(dev);
+ while (pd->tx_skb_i != pd->tx_skb_o)
+ dev_kfree_skb(pd->tx_skb[(pd->tx_skb_o++) % S6_NUM_TX_SKB]);
+ while (pd->rx_skb_i != pd->rx_skb_o)
+ dev_kfree_skb(pd->rx_skb[(pd->rx_skb_o++) % S6_NUM_RX_SKB]);
+ spin_unlock_irqrestore(&pd->lock, flags);
+ return 0;
+}
+
+static struct net_device_stats *s6gmac_stats(struct net_device *dev)
+{
+ struct s6gmac *pd = netdev_priv(dev);
+ struct net_device_stats *st = (struct net_device_stats *)&pd->stats;
+ int i;
+ do {
+ unsigned long flags;
+ spin_lock_irqsave(&pd->lock, flags);
+ for (i = 0; i < sizeof(pd->stats) / sizeof(unsigned long); i++)
+ pd->stats[i] =
+ pd->carry[i] << (S6_GMAC_STAT_SIZE_MIN - 1);
+ s6gmac_stats_collect(pd, &statinf[0][0]);
+ s6gmac_stats_collect(pd, &statinf[1][0]);
+ i = s6gmac_stats_pending(pd, 0) |
+ s6gmac_stats_pending(pd, 1);
+ spin_unlock_irqrestore(&pd->lock, flags);
+ } while (i);
+ st->rx_errors = st->rx_crc_errors +
+ st->rx_frame_errors +
+ st->rx_length_errors +
+ st->rx_missed_errors;
+ st->tx_errors += st->tx_aborted_errors;
+ return st;
+}
+
+static int __devinit s6gmac_probe(struct platform_device *pdev)
+{
+ struct net_device *dev;
+ struct s6gmac *pd;
+ int res;
+ unsigned long i;
+ struct mii_bus *mb;
+ dev = alloc_etherdev(sizeof(*pd));
+ if (!dev) {
+ printk(KERN_ERR DRV_PRMT "etherdev alloc failed, aborting.\n");
+ return -ENOMEM;
+ }
+ dev->open = s6gmac_open;
+ dev->stop = s6gmac_stop;
+ dev->hard_start_xmit = s6gmac_tx;
+ dev->tx_timeout = s6gmac_tx_timeout;
+ dev->watchdog_timeo = HZ;
+ dev->get_stats = s6gmac_stats;
+ dev->irq = platform_get_irq(pdev, 0);
+ pd = netdev_priv(dev);
+ memset(pd, 0, sizeof(*pd));
+ spin_lock_init(&pd->lock);
+ pd->reg = platform_get_resource(pdev, IORESOURCE_MEM, 0)->start;
+ i = platform_get_resource(pdev, IORESOURCE_DMA, 0)->start;
+ pd->tx_dma = DMA_MASK_DMAC(i);
+ pd->tx_chan = DMA_INDEX_CHNL(i);
+ i = platform_get_resource(pdev, IORESOURCE_DMA, 1)->start;
+ pd->rx_dma = DMA_MASK_DMAC(i);
+ pd->rx_chan = DMA_INDEX_CHNL(i);
+ pd->io = platform_get_resource(pdev, IORESOURCE_IO, 0)->start;
+ res = request_irq(dev->irq, &s6gmac_interrupt, 0, dev->name, dev);
+ if (res) {
+ printk(KERN_ERR DRV_PRMT "irq request failed: %d\n", dev->irq);
+ goto errirq;
+ }
+ res = register_netdev(dev);
+ if (res) {
+ printk(KERN_ERR DRV_PRMT "error registering device %s\n",
+ dev->name);
+ goto errdev;
+ }
+ mb = mdiobus_alloc();
+ if (!mb) {
+ printk(KERN_ERR DRV_PRMT "error allocating mii bus\n");
+ goto errmii;
+ }
+ mb->name = "s6gmac_mii";
+ mb->read = s6mii_read;
+ mb->write = s6mii_write;
+ mb->reset = s6mii_reset;
+ mb->priv = pd;
+ snprintf(mb->id, MII_BUS_ID_SIZE, "0");
+ mb->phy_mask = ~(1 << 0);
+ mb->irq = &pd->mii.irq[0];
+ for (i = 0; i < PHY_MAX_ADDR; i++) {
+ int n = platform_get_irq(pdev, i + 1);
+ if (n < 0)
+ n = PHY_POLL;
+ pd->mii.irq[i] = n;
+ }
+ mdiobus_register(mb);
+ pd->mii.bus = mb;
+ res = s6gmac_phy_start(dev);
+ if (res)
+ return res;
+ platform_set_drvdata(pdev, dev);
+ return 0;
+errmii:
+ unregister_netdev(dev);
+errdev:
+ free_irq(dev->irq, dev);
+errirq:
+ free_netdev(dev);
+ return res;
+}
+
+static int __devexit s6gmac_remove(struct platform_device *pdev)
+{
+ struct net_device *dev = platform_get_drvdata(pdev);
+ if (dev) {
+ struct s6gmac *pd = netdev_priv(dev);
+ mdiobus_unregister(pd->mii.bus);
+ unregister_netdev(dev);
+ free_irq(dev->irq, dev);
+ free_netdev(dev);
+ platform_set_drvdata(pdev, NULL);
+ }
+ return 0;
+}
+
+static struct platform_driver s6gmac_driver = {
+ .probe = s6gmac_probe,
+ .remove = __devexit_p(s6gmac_remove),
+ .driver = {
+ .name = "s6gmac",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init s6gmac_init(void)
+{
+ printk(KERN_INFO DRV_PRMT "S6 GMAC ethernet driver\n");
+ return platform_driver_register(&s6gmac_driver);
+}
+
+
+static void __exit s6gmac_exit(void)
+{
+ platform_driver_unregister(&s6gmac_driver);
+}
+
+module_init(s6gmac_init);
+module_exit(s6gmac_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("S6105 on chip Ethernet driver");
+MODULE_AUTHOR("Oskar Schirmer <os@emlix.com>");
diff --git a/drivers/net/sh_eth.c b/drivers/net/sh_eth.c
index 341882f959f3..a2d82ddb3b4d 100644
--- a/drivers/net/sh_eth.c
+++ b/drivers/net/sh_eth.c
@@ -865,8 +865,7 @@ static irqreturn_t sh_eth_interrupt(int irq, void *netdev)
struct sh_eth_private *mdp = netdev_priv(ndev);
struct sh_eth_cpu_data *cd = mdp->cd;
irqreturn_t ret = IRQ_NONE;
- u32 ioaddr, boguscnt = RX_RING_SIZE;
- u32 intr_status = 0;
+ u32 ioaddr, intr_status = 0;
ioaddr = ndev->base_addr;
spin_lock(&mdp->lock);
@@ -901,12 +900,6 @@ static irqreturn_t sh_eth_interrupt(int irq, void *netdev)
if (intr_status & cd->eesr_err_check)
sh_eth_error(ndev, intr_status);
- if (--boguscnt < 0) {
- printk(KERN_WARNING
- "%s: Too much work at interrupt, status=0x%4.4x.\n",
- ndev->name, intr_status);
- }
-
other_irq:
spin_unlock(&mdp->lock);
diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c
index 7681d28c53d7..daf961ab68bc 100644
--- a/drivers/net/sky2.c
+++ b/drivers/net/sky2.c
@@ -2495,7 +2495,7 @@ static int sky2_status_intr(struct sky2_hw *hw, int to_do, u16 idx)
if (likely(status >> 16 == (status & 0xffff))) {
skb = sky2->rx_ring[sky2->rx_next].skb;
skb->ip_summed = CHECKSUM_COMPLETE;
- skb->csum = status & 0xffff;
+ skb->csum = le16_to_cpu(status);
} else {
printk(KERN_NOTICE PFX "%s: hardware receive "
"checksum problem (status = %#x)\n",
diff --git a/drivers/net/smsc911x.c b/drivers/net/smsc911x.c
index b60639bd181b..66067f9d91c0 100644
--- a/drivers/net/smsc911x.c
+++ b/drivers/net/smsc911x.c
@@ -1938,7 +1938,7 @@ static int __devexit smsc911x_drv_remove(struct platform_device *pdev)
if (!res)
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- release_mem_region(res->start, res->end - res->start);
+ release_mem_region(res->start, resource_size(res));
iounmap(pdata->ioaddr);
@@ -1976,7 +1976,7 @@ static int __devinit smsc911x_drv_probe(struct platform_device *pdev)
retval = -ENODEV;
goto out_0;
}
- res_size = res->end - res->start + 1;
+ res_size = resource_size(res);
irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
if (!irq_res) {
@@ -2104,7 +2104,7 @@ out_unmap_io_3:
out_free_netdev_2:
free_netdev(dev);
out_release_io_1:
- release_mem_region(res->start, res->end - res->start);
+ release_mem_region(res->start, resource_size(res));
out_0:
return retval;
}
diff --git a/drivers/net/starfire.c b/drivers/net/starfire.c
index 838cce8b8fff..669253c7bd41 100644
--- a/drivers/net/starfire.c
+++ b/drivers/net/starfire.c
@@ -180,7 +180,7 @@ static int full_duplex[MAX_UNITS] = {0, };
/* These identify the driver base version and may not be removed. */
static const char version[] __devinitconst =
KERN_INFO "starfire.c:v1.03 7/26/2000 Written by Donald Becker <becker@scyld.com>\n"
-KERN_INFO " (unofficial 2.2/2.4 kernel port, version " DRV_VERSION ", " DRV_RELDATE ")\n";
+" (unofficial 2.2/2.4 kernel port, version " DRV_VERSION ", " DRV_RELDATE ")\n";
MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
MODULE_DESCRIPTION("Adaptec Starfire Ethernet driver");
diff --git a/drivers/net/sundance.c b/drivers/net/sundance.c
index 545f81b34ad7..d1521c3875b2 100644
--- a/drivers/net/sundance.c
+++ b/drivers/net/sundance.c
@@ -1698,13 +1698,13 @@ static int netdev_close(struct net_device *dev)
#ifdef __i386__
if (netif_msg_hw(np)) {
- printk("\n"KERN_DEBUG" Tx ring at %8.8x:\n",
+ printk(KERN_DEBUG " Tx ring at %8.8x:\n",
(int)(np->tx_ring_dma));
for (i = 0; i < TX_RING_SIZE; i++)
- printk(" #%d desc. %4.4x %8.8x %8.8x.\n",
+ printk(KERN_DEBUG " #%d desc. %4.4x %8.8x %8.8x.\n",
i, np->tx_ring[i].status, np->tx_ring[i].frag[0].addr,
np->tx_ring[i].frag[0].length);
- printk("\n"KERN_DEBUG " Rx ring %8.8x:\n",
+ printk(KERN_DEBUG " Rx ring %8.8x:\n",
(int)(np->rx_ring_dma));
for (i = 0; i < /*RX_RING_SIZE*/4 ; i++) {
printk(KERN_DEBUG " #%d desc. %4.4x %4.4x %8.8x\n",
diff --git a/drivers/net/tsi108_eth.c b/drivers/net/tsi108_eth.c
index 0f78f99f9b20..7030bd5e9848 100644
--- a/drivers/net/tsi108_eth.c
+++ b/drivers/net/tsi108_eth.c
@@ -1132,7 +1132,9 @@ static int tsi108_get_mac(struct net_device *dev)
}
if (!is_valid_ether_addr(dev->dev_addr)) {
- printk("KERN_ERR: word1: %08x, word2: %08x\n", word1, word2);
+ printk(KERN_ERR
+ "%s: Invalid MAC address. word1: %08x, word2: %08x\n",
+ dev->name, word1, word2);
return -EINVAL;
}
@@ -1201,8 +1203,8 @@ static void tsi108_set_rx_mode(struct net_device *dev)
__set_bit(hash, &data->mc_hash[0]);
} else {
printk(KERN_ERR
- "%s: got multicast address of length %d "
- "instead of 6.\n", dev->name,
+ "%s: got multicast address of length %d instead of 6.\n",
+ dev->name,
mc->dmi_addrlen);
}
diff --git a/drivers/net/tulip/de2104x.c b/drivers/net/tulip/de2104x.c
index 81f054dbb88d..ef49744a5085 100644
--- a/drivers/net/tulip/de2104x.c
+++ b/drivers/net/tulip/de2104x.c
@@ -944,9 +944,10 @@ static void de_set_media (struct de_private *de)
macmode &= ~FullDuplex;
if (netif_msg_link(de)) {
- printk(KERN_INFO "%s: set link %s\n"
- KERN_INFO "%s: mode 0x%x, sia 0x%x,0x%x,0x%x,0x%x\n"
- KERN_INFO "%s: set mode 0x%x, set sia 0x%x,0x%x,0x%x\n",
+ printk(KERN_INFO
+ "%s: set link %s\n"
+ "%s: mode 0x%x, sia 0x%x,0x%x,0x%x,0x%x\n"
+ "%s: set mode 0x%x, set sia 0x%x,0x%x,0x%x\n",
de->dev->name, media_name[media],
de->dev->name, dr32(MacMode), dr32(SIAStatus),
dr32(CSR13), dr32(CSR14), dr32(CSR15),
diff --git a/drivers/net/tulip/tulip_core.c b/drivers/net/tulip/tulip_core.c
index 2abb5d3becc6..99a63649f4fc 100644
--- a/drivers/net/tulip/tulip_core.c
+++ b/drivers/net/tulip/tulip_core.c
@@ -570,16 +570,18 @@ static void tulip_tx_timeout(struct net_device *dev)
(unsigned int)tp->rx_ring[i].buffer2,
buf[0], buf[1], buf[2]);
for (j = 0; buf[j] != 0xee && j < 1600; j++)
- if (j < 100) printk(" %2.2x", buf[j]);
- printk(" j=%d.\n", j);
+ if (j < 100)
+ printk(KERN_CONT " %2.2x", buf[j]);
+ printk(KERN_CONT " j=%d.\n", j);
}
printk(KERN_DEBUG " Rx ring %8.8x: ", (int)tp->rx_ring);
for (i = 0; i < RX_RING_SIZE; i++)
- printk(" %8.8x", (unsigned int)tp->rx_ring[i].status);
- printk("\n" KERN_DEBUG " Tx ring %8.8x: ", (int)tp->tx_ring);
+ printk(KERN_CONT " %8.8x",
+ (unsigned int)tp->rx_ring[i].status);
+ printk(KERN_DEBUG " Tx ring %8.8x: ", (int)tp->tx_ring);
for (i = 0; i < TX_RING_SIZE; i++)
- printk(" %8.8x", (unsigned int)tp->tx_ring[i].status);
- printk("\n");
+ printk(KERN_CONT " %8.8x", (unsigned int)tp->tx_ring[i].status);
+ printk(KERN_CONT "\n");
}
#endif
diff --git a/drivers/net/tulip/winbond-840.c b/drivers/net/tulip/winbond-840.c
index 842b1a2c40d4..0f15773dae52 100644
--- a/drivers/net/tulip/winbond-840.c
+++ b/drivers/net/tulip/winbond-840.c
@@ -142,7 +142,7 @@ static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
static const char version[] __initconst =
KERN_INFO DRV_NAME ".c:v" DRV_VERSION " (2.4 port) "
DRV_RELDATE " Donald Becker <becker@scyld.com>\n"
- KERN_INFO " http://www.scyld.com/network/drivers.html\n";
+ " http://www.scyld.com/network/drivers.html\n";
MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
MODULE_DESCRIPTION("Winbond W89c840 Ethernet driver");
@@ -939,7 +939,7 @@ static void tx_timeout(struct net_device *dev)
printk(KERN_DEBUG " Rx ring %p: ", np->rx_ring);
for (i = 0; i < RX_RING_SIZE; i++)
printk(" %8.8x", (unsigned int)np->rx_ring[i].status);
- printk("\n"KERN_DEBUG" Tx ring %p: ", np->tx_ring);
+ printk(KERN_DEBUG" Tx ring %p: ", np->tx_ring);
for (i = 0; i < TX_RING_SIZE; i++)
printk(" %8.8x", np->tx_ring[i].status);
printk("\n");
@@ -1520,7 +1520,7 @@ static int netdev_close(struct net_device *dev)
printk(KERN_DEBUG " #%d desc. %4.4x %4.4x %8.8x.\n",
i, np->tx_ring[i].length,
np->tx_ring[i].status, np->tx_ring[i].buffer1);
- printk("\n"KERN_DEBUG " Rx ring %8.8x:\n",
+ printk(KERN_DEBUG " Rx ring %8.8x:\n",
(int)np->rx_ring);
for (i = 0; i < RX_RING_SIZE; i++) {
printk(KERN_DEBUG " #%d desc. %4.4x %4.4x %8.8x\n",
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 11a0ba47b677..027f7aba26af 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -486,12 +486,14 @@ static unsigned int tun_chr_poll(struct file *file, poll_table * wait)
{
struct tun_file *tfile = file->private_data;
struct tun_struct *tun = __tun_get(tfile);
- struct sock *sk = tun->sk;
+ struct sock *sk;
unsigned int mask = 0;
if (!tun)
return POLLERR;
+ sk = tun->sk;
+
DBG(KERN_INFO "%s: tun_chr_poll\n", tun->dev->name);
poll_wait(file, &tun->socket.wait, wait);
@@ -1324,20 +1326,22 @@ static int tun_chr_close(struct inode *inode, struct file *file)
struct tun_file *tfile = file->private_data;
struct tun_struct *tun;
-
- rtnl_lock();
tun = __tun_get(tfile);
if (tun) {
- DBG(KERN_INFO "%s: tun_chr_close\n", tun->dev->name);
+ struct net_device *dev = tun->dev;
+
+ DBG(KERN_INFO "%s: tun_chr_close\n", dev->name);
__tun_detach(tun);
/* If desireable, unregister the netdevice. */
- if (!(tun->flags & TUN_PERSIST))
- unregister_netdevice(tun->dev);
-
+ if (!(tun->flags & TUN_PERSIST)) {
+ rtnl_lock();
+ if (dev->reg_state == NETREG_REGISTERED)
+ unregister_netdevice(dev);
+ rtnl_unlock();
+ }
}
- rtnl_unlock();
tun = tfile->tun;
if (tun)
diff --git a/drivers/net/usb/Kconfig b/drivers/net/usb/Kconfig
index 3717569828bf..a906d3998131 100644
--- a/drivers/net/usb/Kconfig
+++ b/drivers/net/usb/Kconfig
@@ -169,10 +169,12 @@ config USB_NET_CDCETHER
The Linux-USB CDC Ethernet Gadget driver is an open implementation.
This driver should work with at least the following devices:
+ * Dell Wireless 5530 HSPA
* Ericsson PipeRider (all variants)
+ * Ericsson Mobile Broadband Module (all variants)
* Motorola (DM100 and SB4100)
* Broadcom Cable Modem (reference design)
- * Toshiba PCX1100U
+ * Toshiba (PCX1100U and F3507g)
* ...
This driver creates an interface named "ethX", where X depends on
diff --git a/drivers/net/usb/cdc_eem.c b/drivers/net/usb/cdc_eem.c
index 80e01778dd3b..cd35d50e46d4 100644
--- a/drivers/net/usb/cdc_eem.c
+++ b/drivers/net/usb/cdc_eem.c
@@ -319,7 +319,7 @@ static int eem_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
return crc == crc2;
if (unlikely(crc != crc2)) {
- dev->stats.rx_errors++;
+ dev->net->stats.rx_errors++;
dev_kfree_skb_any(skb2);
} else
usbnet_skb_return(dev, skb2);
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
index 01fd528306ec..4a6aff579403 100644
--- a/drivers/net/usb/cdc_ether.c
+++ b/drivers/net/usb/cdc_ether.c
@@ -533,6 +533,31 @@ static const struct usb_device_id products [] = {
USB_DEVICE_AND_INTERFACE_INFO(0x0bdb, 0x1900, USB_CLASS_COMM,
USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
.driver_info = (unsigned long) &cdc_info,
+}, {
+ /* Ericsson F3507g ver. 2 */
+ USB_DEVICE_AND_INTERFACE_INFO(0x0bdb, 0x1902, USB_CLASS_COMM,
+ USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
+ .driver_info = (unsigned long) &cdc_info,
+}, {
+ /* Ericsson F3607gw */
+ USB_DEVICE_AND_INTERFACE_INFO(0x0bdb, 0x1904, USB_CLASS_COMM,
+ USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
+ .driver_info = (unsigned long) &cdc_info,
+}, {
+ /* Ericsson F3307 */
+ USB_DEVICE_AND_INTERFACE_INFO(0x0bdb, 0x1906, USB_CLASS_COMM,
+ USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
+ .driver_info = (unsigned long) &cdc_info,
+}, {
+ /* Toshiba F3507g */
+ USB_DEVICE_AND_INTERFACE_INFO(0x0930, 0x130b, USB_CLASS_COMM,
+ USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
+ .driver_info = (unsigned long) &cdc_info,
+}, {
+ /* Dell F3507g */
+ USB_DEVICE_AND_INTERFACE_INFO(0x413c, 0x8147, USB_CLASS_COMM,
+ USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
+ .driver_info = (unsigned long) &cdc_info,
},
{ }, // END
};
diff --git a/drivers/net/usb/cdc_subset.c b/drivers/net/usb/cdc_subset.c
index c66b9c324f54..ca39ace0b0eb 100644
--- a/drivers/net/usb/cdc_subset.c
+++ b/drivers/net/usb/cdc_subset.c
@@ -307,9 +307,10 @@ static const struct usb_device_id products [] = {
USB_DEVICE (0x1286, 0x8001), // "blob" bootloader
.driver_info = (unsigned long) &blob_info,
}, {
- // Linux Ethernet/RNDIS gadget on pxa210/25x/26x, second config
- // e.g. Gumstix, current OpenZaurus, ...
- USB_DEVICE_VER (0x0525, 0xa4a2, 0x0203, 0x0203),
+ // Linux Ethernet/RNDIS gadget, mostly on PXA, second config
+ // e.g. Gumstix, current OpenZaurus, ... or anything else
+ // that just enables this gadget option.
+ USB_DEVICE (0x0525, 0xa4a2),
.driver_info = (unsigned long) &linuxdev_info,
},
#endif
diff --git a/drivers/net/usb/dm9601.c b/drivers/net/usb/dm9601.c
index 7ae82446b93a..1d3730d6690f 100644
--- a/drivers/net/usb/dm9601.c
+++ b/drivers/net/usb/dm9601.c
@@ -513,11 +513,11 @@ static int dm9601_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
len = (skb->data[1] | (skb->data[2] << 8)) - 4;
if (unlikely(status & 0xbf)) {
- if (status & 0x01) dev->stats.rx_fifo_errors++;
- if (status & 0x02) dev->stats.rx_crc_errors++;
- if (status & 0x04) dev->stats.rx_frame_errors++;
- if (status & 0x20) dev->stats.rx_missed_errors++;
- if (status & 0x90) dev->stats.rx_length_errors++;
+ if (status & 0x01) dev->net->stats.rx_fifo_errors++;
+ if (status & 0x02) dev->net->stats.rx_crc_errors++;
+ if (status & 0x04) dev->net->stats.rx_frame_errors++;
+ if (status & 0x20) dev->net->stats.rx_missed_errors++;
+ if (status & 0x90) dev->net->stats.rx_length_errors++;
return 0;
}
diff --git a/drivers/net/usb/net1080.c b/drivers/net/usb/net1080.c
index 034e8a73ca6b..aeb1ab03a9ee 100644
--- a/drivers/net/usb/net1080.c
+++ b/drivers/net/usb/net1080.c
@@ -433,7 +433,7 @@ static int net1080_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
dbg("rx framesize %d range %d..%d mtu %d", skb->len,
net->hard_header_len, dev->hard_mtu, net->mtu);
#endif
- dev->stats.rx_frame_errors++;
+ dev->net->stats.rx_frame_errors++;
nc_ensure_sync(dev);
return 0;
}
@@ -442,12 +442,12 @@ static int net1080_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
hdr_len = le16_to_cpup(&header->hdr_len);
packet_len = le16_to_cpup(&header->packet_len);
if (FRAMED_SIZE(packet_len) > NC_MAX_PACKET) {
- dev->stats.rx_frame_errors++;
+ dev->net->stats.rx_frame_errors++;
dbg("packet too big, %d", packet_len);
nc_ensure_sync(dev);
return 0;
} else if (hdr_len < MIN_HEADER) {
- dev->stats.rx_frame_errors++;
+ dev->net->stats.rx_frame_errors++;
dbg("header too short, %d", hdr_len);
nc_ensure_sync(dev);
return 0;
@@ -465,21 +465,21 @@ static int net1080_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
if ((packet_len & 0x01) == 0) {
if (skb->data [packet_len] != PAD_BYTE) {
- dev->stats.rx_frame_errors++;
+ dev->net->stats.rx_frame_errors++;
dbg("bad pad");
return 0;
}
skb_trim(skb, skb->len - 1);
}
if (skb->len != packet_len) {
- dev->stats.rx_frame_errors++;
+ dev->net->stats.rx_frame_errors++;
dbg("bad packet len %d (expected %d)",
skb->len, packet_len);
nc_ensure_sync(dev);
return 0;
}
if (header->packet_id != get_unaligned(&trailer->packet_id)) {
- dev->stats.rx_fifo_errors++;
+ dev->net->stats.rx_fifo_errors++;
dbg("(2+ dropped) rx packet_id mismatch 0x%x 0x%x",
le16_to_cpu(header->packet_id),
le16_to_cpu(trailer->packet_id));
diff --git a/drivers/net/usb/pegasus.c b/drivers/net/usb/pegasus.c
index 2138535f2339..73acbd244aa1 100644
--- a/drivers/net/usb/pegasus.c
+++ b/drivers/net/usb/pegasus.c
@@ -297,7 +297,7 @@ static int update_eth_regs_async(pegasus_t * pegasus)
pegasus->dr.bRequestType = PEGASUS_REQT_WRITE;
pegasus->dr.bRequest = PEGASUS_REQ_SET_REGS;
- pegasus->dr.wValue = 0;
+ pegasus->dr.wValue = cpu_to_le16(0);
pegasus->dr.wIndex = cpu_to_le16(EthCtrl0);
pegasus->dr.wLength = cpu_to_le16(3);
pegasus->ctrl_urb->transfer_buffer_length = 3;
@@ -446,11 +446,12 @@ static int write_eprom_word(pegasus_t * pegasus, __u8 index, __u16 data)
int i;
__u8 tmp, d[4] = { 0x3f, 0, 0, EPROM_WRITE };
int ret;
+ __le16 le_data = cpu_to_le16(data);
set_registers(pegasus, EpromOffset, 4, d);
enable_eprom_write(pegasus);
set_register(pegasus, EpromOffset, index);
- set_registers(pegasus, EpromData, 2, &data);
+ set_registers(pegasus, EpromData, 2, &le_data);
set_register(pegasus, EpromCtrl, EPROM_WRITE);
for (i = 0; i < REG_TIMEOUT; i++) {
@@ -923,29 +924,32 @@ static struct net_device_stats *pegasus_netdev_stats(struct net_device *dev)
static inline void disable_net_traffic(pegasus_t * pegasus)
{
- int tmp = 0;
+ __le16 tmp = cpu_to_le16(0);
- set_registers(pegasus, EthCtrl0, 2, &tmp);
+ set_registers(pegasus, EthCtrl0, sizeof(tmp), &tmp);
}
static inline void get_interrupt_interval(pegasus_t * pegasus)
{
- __u8 data[2];
+ u16 data;
+ u8 interval;
- read_eprom_word(pegasus, 4, (__u16 *) data);
+ read_eprom_word(pegasus, 4, &data);
+ interval = data >> 8;
if (pegasus->usb->speed != USB_SPEED_HIGH) {
- if (data[1] < 0x80) {
+ if (interval < 0x80) {
if (netif_msg_timer(pegasus))
dev_info(&pegasus->intf->dev, "intr interval "
"changed from %ums to %ums\n",
- data[1], 0x80);
- data[1] = 0x80;
+ interval, 0x80);
+ interval = 0x80;
+ data = (data & 0x00FF) | ((u16)interval << 8);
#ifdef PEGASUS_WRITE_EEPROM
- write_eprom_word(pegasus, 4, *(__u16 *) data);
+ write_eprom_word(pegasus, 4, data);
#endif
}
}
- pegasus->intr_interval = data[1];
+ pegasus->intr_interval = interval;
}
static void set_carrier(struct net_device *net)
@@ -1299,7 +1303,8 @@ static int pegasus_blacklisted(struct usb_device *udev)
/* Special quirk to keep the driver from handling the Belkin Bluetooth
* dongle which happens to have the same ID.
*/
- if ((udd->idVendor == VENDOR_BELKIN && udd->idProduct == 0x0121) &&
+ if ((udd->idVendor == cpu_to_le16(VENDOR_BELKIN)) &&
+ (udd->idProduct == cpu_to_le16(0x0121)) &&
(udd->bDeviceClass == USB_CLASS_WIRELESS_CONTROLLER) &&
(udd->bDeviceProtocol == 1))
return 1;
diff --git a/drivers/net/usb/rndis_host.c b/drivers/net/usb/rndis_host.c
index 1bf243ef950e..2232232b7989 100644
--- a/drivers/net/usb/rndis_host.c
+++ b/drivers/net/usb/rndis_host.c
@@ -487,7 +487,7 @@ int rndis_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
if (unlikely(hdr->msg_type != RNDIS_MSG_PACKET
|| skb->len < msg_len
|| (data_offset + data_len + 8) > msg_len)) {
- dev->stats.rx_frame_errors++;
+ dev->net->stats.rx_frame_errors++;
devdbg(dev, "bad rndis message %d/%d/%d/%d, len %d",
le32_to_cpu(hdr->msg_type),
msg_len, data_offset, data_len, skb->len);
diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c
index 89a91f8c22de..fe045896406b 100644
--- a/drivers/net/usb/smsc95xx.c
+++ b/drivers/net/usb/smsc95xx.c
@@ -1108,18 +1108,18 @@ static int smsc95xx_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
if (unlikely(header & RX_STS_ES_)) {
if (netif_msg_rx_err(dev))
devdbg(dev, "Error header=0x%08x", header);
- dev->stats.rx_errors++;
- dev->stats.rx_dropped++;
+ dev->net->stats.rx_errors++;
+ dev->net->stats.rx_dropped++;
if (header & RX_STS_CRC_) {
- dev->stats.rx_crc_errors++;
+ dev->net->stats.rx_crc_errors++;
} else {
if (header & (RX_STS_TL_ | RX_STS_RF_))
- dev->stats.rx_frame_errors++;
+ dev->net->stats.rx_frame_errors++;
if ((header & RX_STS_LE_) &&
(!(header & RX_STS_FT_)))
- dev->stats.rx_length_errors++;
+ dev->net->stats.rx_length_errors++;
}
} else {
/* ETH_FRAME_LEN + 4(CRC) + 2(COE) + 4(Vlan) */
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index 22c0585a0319..edfd9e10ceba 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -234,8 +234,8 @@ void usbnet_skb_return (struct usbnet *dev, struct sk_buff *skb)
int status;
skb->protocol = eth_type_trans (skb, dev->net);
- dev->stats.rx_packets++;
- dev->stats.rx_bytes += skb->len;
+ dev->net->stats.rx_packets++;
+ dev->net->stats.rx_bytes += skb->len;
if (netif_msg_rx_status (dev))
devdbg (dev, "< rx, len %zu, type 0x%x",
@@ -397,7 +397,7 @@ static inline void rx_process (struct usbnet *dev, struct sk_buff *skb)
if (netif_msg_rx_err (dev))
devdbg (dev, "drop");
error:
- dev->stats.rx_errors++;
+ dev->net->stats.rx_errors++;
skb_queue_tail (&dev->done, skb);
}
}
@@ -420,8 +420,8 @@ static void rx_complete (struct urb *urb)
case 0:
if (skb->len < dev->net->hard_header_len) {
entry->state = rx_cleanup;
- dev->stats.rx_errors++;
- dev->stats.rx_length_errors++;
+ dev->net->stats.rx_errors++;
+ dev->net->stats.rx_length_errors++;
if (netif_msg_rx_err (dev))
devdbg (dev, "rx length %d", skb->len);
}
@@ -433,7 +433,7 @@ static void rx_complete (struct urb *urb)
* storm, recovering as needed.
*/
case -EPIPE:
- dev->stats.rx_errors++;
+ dev->net->stats.rx_errors++;
usbnet_defer_kevent (dev, EVENT_RX_HALT);
// FALLTHROUGH
@@ -451,7 +451,7 @@ static void rx_complete (struct urb *urb)
case -EPROTO:
case -ETIME:
case -EILSEQ:
- dev->stats.rx_errors++;
+ dev->net->stats.rx_errors++;
if (!timer_pending (&dev->delay)) {
mod_timer (&dev->delay, jiffies + THROTTLE_JIFFIES);
if (netif_msg_link (dev))
@@ -465,12 +465,12 @@ block:
/* data overrun ... flush fifo? */
case -EOVERFLOW:
- dev->stats.rx_over_errors++;
+ dev->net->stats.rx_over_errors++;
// FALLTHROUGH
default:
entry->state = rx_cleanup;
- dev->stats.rx_errors++;
+ dev->net->stats.rx_errors++;
if (netif_msg_rx_err (dev))
devdbg (dev, "rx status %d", urb_status);
break;
@@ -583,8 +583,8 @@ int usbnet_stop (struct net_device *net)
if (netif_msg_ifdown (dev))
devinfo (dev, "stop stats: rx/tx %ld/%ld, errs %ld/%ld",
- dev->stats.rx_packets, dev->stats.tx_packets,
- dev->stats.rx_errors, dev->stats.tx_errors
+ net->stats.rx_packets, net->stats.tx_packets,
+ net->stats.rx_errors, net->stats.tx_errors
);
// ensure there are no more active urbs
@@ -891,10 +891,10 @@ static void tx_complete (struct urb *urb)
struct usbnet *dev = entry->dev;
if (urb->status == 0) {
- dev->stats.tx_packets++;
- dev->stats.tx_bytes += entry->length;
+ dev->net->stats.tx_packets++;
+ dev->net->stats.tx_bytes += entry->length;
} else {
- dev->stats.tx_errors++;
+ dev->net->stats.tx_errors++;
switch (urb->status) {
case -EPIPE:
@@ -1020,7 +1020,7 @@ int usbnet_start_xmit (struct sk_buff *skb, struct net_device *net)
devdbg (dev, "drop, code %d", retval);
drop:
retval = NET_XMIT_SUCCESS;
- dev->stats.tx_dropped++;
+ dev->net->stats.tx_dropped++;
if (skb)
dev_kfree_skb_any (skb);
usb_free_urb (urb);
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index 87197dd9c788..1097c72e44d5 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -208,11 +208,14 @@ rx_drop:
static struct net_device_stats *veth_get_stats(struct net_device *dev)
{
- struct veth_priv *priv = netdev_priv(dev);
- struct net_device_stats *dev_stats = &dev->stats;
- unsigned int cpu;
+ struct veth_priv *priv;
+ struct net_device_stats *dev_stats;
+ int cpu;
struct veth_net_stats *stats;
+ priv = netdev_priv(dev);
+ dev_stats = &dev->stats;
+
dev_stats->rx_packets = 0;
dev_stats->tx_packets = 0;
dev_stats->rx_bytes = 0;
@@ -220,17 +223,16 @@ static struct net_device_stats *veth_get_stats(struct net_device *dev)
dev_stats->tx_dropped = 0;
dev_stats->rx_dropped = 0;
- if (priv->stats)
- for_each_online_cpu(cpu) {
- stats = per_cpu_ptr(priv->stats, cpu);
+ for_each_online_cpu(cpu) {
+ stats = per_cpu_ptr(priv->stats, cpu);
- dev_stats->rx_packets += stats->rx_packets;
- dev_stats->tx_packets += stats->tx_packets;
- dev_stats->rx_bytes += stats->rx_bytes;
- dev_stats->tx_bytes += stats->tx_bytes;
- dev_stats->tx_dropped += stats->tx_dropped;
- dev_stats->rx_dropped += stats->rx_dropped;
- }
+ dev_stats->rx_packets += stats->rx_packets;
+ dev_stats->tx_packets += stats->tx_packets;
+ dev_stats->rx_bytes += stats->rx_bytes;
+ dev_stats->tx_bytes += stats->tx_bytes;
+ dev_stats->tx_dropped += stats->tx_dropped;
+ dev_stats->rx_dropped += stats->rx_dropped;
+ }
return dev_stats;
}
@@ -257,8 +259,6 @@ static int veth_close(struct net_device *dev)
netif_carrier_off(dev);
netif_carrier_off(priv->peer);
- free_percpu(priv->stats);
- priv->stats = NULL;
return 0;
}
@@ -289,6 +289,15 @@ static int veth_dev_init(struct net_device *dev)
return 0;
}
+static void veth_dev_free(struct net_device *dev)
+{
+ struct veth_priv *priv;
+
+ priv = netdev_priv(dev);
+ free_percpu(priv->stats);
+ free_netdev(dev);
+}
+
static const struct net_device_ops veth_netdev_ops = {
.ndo_init = veth_dev_init,
.ndo_open = veth_open,
@@ -306,7 +315,7 @@ static void veth_setup(struct net_device *dev)
dev->netdev_ops = &veth_netdev_ops;
dev->ethtool_ops = &veth_ethtool_ops;
dev->features |= NETIF_F_LLTX;
- dev->destructor = free_netdev;
+ dev->destructor = veth_dev_free;
}
/*
diff --git a/drivers/net/via-velocity.c b/drivers/net/via-velocity.c
index b02f7adff5dc..3ba35956327a 100644
--- a/drivers/net/via-velocity.c
+++ b/drivers/net/via-velocity.c
@@ -1847,7 +1847,7 @@ static void velocity_free_tx_buf(struct velocity_info *vptr, struct velocity_td_
*/
if (tdinfo->skb_dma) {
- pktlen = (skb->len > ETH_ZLEN ? : ETH_ZLEN);
+ pktlen = max_t(unsigned int, skb->len, ETH_ZLEN);
for (i = 0; i < tdinfo->nskb_dma; i++) {
#ifdef VELOCITY_ZERO_COPY_SUPPORT
pci_unmap_single(vptr->pdev, tdinfo->skb_dma[i], le16_to_cpu(td->tdesc1.len), PCI_DMA_TODEVICE);
diff --git a/drivers/net/wan/hd64570.c b/drivers/net/wan/hd64570.c
index 223238de475c..1ea1ef6c3b96 100644
--- a/drivers/net/wan/hd64570.c
+++ b/drivers/net/wan/hd64570.c
@@ -584,8 +584,9 @@ static void sca_dump_rings(struct net_device *dev)
sca_in(DSR_RX(phy_node(port)), card) & DSR_DE ? "" : "in");
for (cnt = 0; cnt < port_to_card(port)->rx_ring_buffers; cnt++)
printk(" %02X", readb(&(desc_address(port, cnt, 0)->stat)));
+ printk(KERN_CONT "\n");
- printk("\n" KERN_DEBUG "TX ring: CDA=%u EDA=%u DSR=%02X in=%u "
+ printk(KERN_DEBUG "TX ring: CDA=%u EDA=%u DSR=%02X in=%u "
"last=%u %sactive",
sca_inw(get_dmac_tx(port) + CDAL, card),
sca_inw(get_dmac_tx(port) + EDAL, card),
diff --git a/drivers/net/wan/hd64572.c b/drivers/net/wan/hd64572.c
index 497b003d7239..f099c34a3ae2 100644
--- a/drivers/net/wan/hd64572.c
+++ b/drivers/net/wan/hd64572.c
@@ -529,8 +529,9 @@ static void sca_dump_rings(struct net_device *dev)
sca_in(DSR_RX(port->chan), card) & DSR_DE ? "" : "in");
for (cnt = 0; cnt < port->card->rx_ring_buffers; cnt++)
printk(" %02X", readb(&(desc_address(port, cnt, 0)->stat)));
+ printk(KERN_CONT "\n");
- printk("\n" KERN_DEBUG "TX ring: CDA=%u EDA=%u DSR=%02X in=%u "
+ printk(KERN_DEBUG "TX ring: CDA=%u EDA=%u DSR=%02X in=%u "
"last=%u %sactive",
sca_inl(get_dmac_tx(port) + CDAL, card),
sca_inl(get_dmac_tx(port) + EDAL, card),
diff --git a/drivers/net/wan/sbni.c b/drivers/net/wan/sbni.c
index 3fb9dbc88a1a..d14e95a08d66 100644
--- a/drivers/net/wan/sbni.c
+++ b/drivers/net/wan/sbni.c
@@ -326,11 +326,9 @@ sbni_pci_probe( struct net_device *dev )
}
if (pci_irq_line <= 0 || pci_irq_line >= nr_irqs)
- printk( KERN_WARNING " WARNING: The PCI BIOS assigned "
- "this PCI card to IRQ %d, which is unlikely "
- "to work!.\n"
- KERN_WARNING " You should use the PCI BIOS "
- "setup to assign a valid IRQ line.\n",
+ printk( KERN_WARNING
+ " WARNING: The PCI BIOS assigned this PCI card to IRQ %d, which is unlikely to work!.\n"
+ " You should use the PCI BIOS setup to assign a valid IRQ line.\n",
pci_irq_line );
/* avoiding re-enable dual adapters */
diff --git a/drivers/net/wireless/ath/Kconfig b/drivers/net/wireless/ath/Kconfig
index d26e7b485315..eb0337c49546 100644
--- a/drivers/net/wireless/ath/Kconfig
+++ b/drivers/net/wireless/ath/Kconfig
@@ -1,5 +1,6 @@
config ATH_COMMON
tristate "Atheros Wireless Cards"
+ depends on WLAN_80211
depends on ATH5K || ATH9K || AR9170_USB
source "drivers/net/wireless/ath/ath5k/Kconfig"
diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c
index 55f7de09d134..ea045151f953 100644
--- a/drivers/net/wireless/ath/ath5k/base.c
+++ b/drivers/net/wireless/ath/ath5k/base.c
@@ -538,6 +538,7 @@ ath5k_pci_probe(struct pci_dev *pdev,
sc->iobase = mem; /* So we can unmap it on detach */
sc->cachelsz = csz * sizeof(u32); /* convert to bytes */
sc->opmode = NL80211_IFTYPE_STATION;
+ sc->bintval = 1000;
mutex_init(&sc->lock);
spin_lock_init(&sc->rxbuflock);
spin_lock_init(&sc->txbuflock);
@@ -686,6 +687,13 @@ ath5k_pci_resume(struct pci_dev *pdev)
if (err)
return err;
+ /*
+ * Suspend/Resume resets the PCI configuration space, so we have to
+ * re-disable the RETRY_TIMEOUT register (0x41) to keep
+ * PCI Tx retries from interfering with C3 CPU state
+ */
+ pci_write_config_byte(pdev, 0x41, 0);
+
err = request_irq(pdev->irq, ath5k_intr, IRQF_SHARED, "ath", sc);
if (err) {
ATH5K_ERR(sc, "request_irq failed\n");
@@ -2748,9 +2756,6 @@ static int ath5k_add_interface(struct ieee80211_hw *hw,
goto end;
}
- /* Set to a reasonable value. Note that this will
- * be set to mac80211's value at ath5k_config(). */
- sc->bintval = 1000;
ath5k_hw_set_lladdr(sc->ah, conf->mac_addr);
ret = 0;
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index 9f49a3251d4d..66a6c1f5022a 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -1196,8 +1196,8 @@ void ath_radio_disable(struct ath_softc *sc)
ath9k_hw_phy_disable(ah);
ath9k_hw_configpcipowersave(ah, 1);
- ath9k_hw_setpower(ah, ATH9K_PM_FULL_SLEEP);
ath9k_ps_restore(sc);
+ ath9k_hw_setpower(ah, ATH9K_PM_FULL_SLEEP);
}
/*******************/
diff --git a/drivers/net/wireless/ath/ath9k/pci.c b/drivers/net/wireless/ath/ath9k/pci.c
index ccdf20a2e9be..170c5b32e49b 100644
--- a/drivers/net/wireless/ath/ath9k/pci.c
+++ b/drivers/net/wireless/ath/ath9k/pci.c
@@ -87,6 +87,7 @@ static int ath_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
struct ath_softc *sc;
struct ieee80211_hw *hw;
u8 csz;
+ u32 val;
int ret = 0;
struct ath_hw *ah;
@@ -133,6 +134,14 @@ static int ath_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
pci_set_master(pdev);
+ /*
+ * Disable the RETRY_TIMEOUT register (0x41) to keep
+ * PCI Tx retries from interfering with C3 CPU state.
+ */
+ pci_read_config_dword(pdev, 0x40, &val);
+ if ((val & 0x0000ff00) != 0)
+ pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
+
ret = pci_request_region(pdev, 0, "ath9k");
if (ret) {
dev_err(&pdev->dev, "PCI memory region reserve error\n");
@@ -239,12 +248,21 @@ static int ath_pci_resume(struct pci_dev *pdev)
struct ieee80211_hw *hw = pci_get_drvdata(pdev);
struct ath_wiphy *aphy = hw->priv;
struct ath_softc *sc = aphy->sc;
+ u32 val;
int err;
err = pci_enable_device(pdev);
if (err)
return err;
pci_restore_state(pdev);
+ /*
+ * Suspend/Resume resets the PCI configuration space, so we have to
+ * re-disable the RETRY_TIMEOUT register (0x41) to keep
+ * PCI Tx retries from interfering with C3 CPU state
+ */
+ pci_read_config_dword(pdev, 0x40, &val);
+ if ((val & 0x0000ff00) != 0)
+ pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
/* Enable LED */
ath9k_hw_cfg_output(sc->sc_ah, ATH_LED_PIN,
diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c
index f99f3a76df3f..cece1c4c6bda 100644
--- a/drivers/net/wireless/ath/ath9k/recv.c
+++ b/drivers/net/wireless/ath/ath9k/recv.c
@@ -539,11 +539,14 @@ static void ath_rx_ps_beacon(struct ath_softc *sc, struct sk_buff *skb)
if (ath_beacon_dtim_pending_cab(skb)) {
/*
* Remain awake waiting for buffered broadcast/multicast
- * frames.
+ * frames. If the last broadcast/multicast frame is not
+ * received properly, the next beacon frame will work as
+ * a backup trigger for returning into NETWORK SLEEP state,
+ * so we are waiting for it as well.
*/
DPRINTF(sc, ATH_DBG_PS, "Received DTIM beacon indicating "
"buffered broadcast/multicast frame(s)\n");
- sc->sc_flags |= SC_OP_WAIT_FOR_CAB;
+ sc->sc_flags |= SC_OP_WAIT_FOR_CAB | SC_OP_WAIT_FOR_BEACON;
return;
}
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index b61a071788a5..4ccf48e396df 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -355,7 +355,14 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
}
if (bf_next == NULL) {
- INIT_LIST_HEAD(&bf_head);
+ /*
+ * Make sure the last desc is reclaimed if it
+ * not a holding desc.
+ */
+ if (!bf_last->bf_stale)
+ list_move_tail(&bf->list, &bf_head);
+ else
+ INIT_LIST_HEAD(&bf_head);
} else {
ASSERT(!list_empty(bf_q));
list_move_tail(&bf->list, &bf_head);
diff --git a/drivers/net/wireless/b43/b43.h b/drivers/net/wireless/b43/b43.h
index f580c2812d91..40448067e4cc 100644
--- a/drivers/net/wireless/b43/b43.h
+++ b/drivers/net/wireless/b43/b43.h
@@ -648,6 +648,7 @@ struct b43_wl {
u8 nr_devs;
bool radiotap_enabled;
+ bool radio_enabled;
/* The beacon we are currently using (AP or IBSS mode).
* This beacon stuff is protected by the irq_lock. */
diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c
index 6456afebdba1..e71c8d9cd706 100644
--- a/drivers/net/wireless/b43/main.c
+++ b/drivers/net/wireless/b43/main.c
@@ -3497,8 +3497,8 @@ static int b43_op_config(struct ieee80211_hw *hw, u32 changed)
if (phy->ops->set_rx_antenna)
phy->ops->set_rx_antenna(dev, antenna);
- if (!!conf->radio_enabled != phy->radio_on) {
- if (conf->radio_enabled) {
+ if (wl->radio_enabled != phy->radio_on) {
+ if (wl->radio_enabled) {
b43_software_rfkill(dev, false);
b43info(dev->wl, "Radio turned on by software\n");
if (!dev->radio_hw_enable) {
@@ -4339,6 +4339,7 @@ static int b43_op_start(struct ieee80211_hw *hw)
wl->beacon0_uploaded = 0;
wl->beacon1_uploaded = 0;
wl->beacon_templates_virgin = 1;
+ wl->radio_enabled = 1;
mutex_lock(&wl->mutex);
@@ -4378,6 +4379,7 @@ static void b43_op_stop(struct ieee80211_hw *hw)
if (b43_status(dev) >= B43_STAT_STARTED)
b43_wireless_core_stop(dev);
b43_wireless_core_exit(dev);
+ wl->radio_enabled = 0;
mutex_unlock(&wl->mutex);
cancel_work_sync(&(wl->txpower_adjust_work));
@@ -4560,6 +4562,7 @@ static int b43_wireless_core_attach(struct b43_wldev *dev)
B43_WARN_ON(1);
dev->phy.gmode = have_2ghz_phy;
+ dev->phy.radio_on = 1;
tmp = dev->phy.gmode ? B43_TMSLOW_GMODE : 0;
b43_wireless_core_reset(dev, tmp);
diff --git a/drivers/net/wireless/b43/pcmcia.c b/drivers/net/wireless/b43/pcmcia.c
index 3cfc30307a27..6c3a74964ab8 100644
--- a/drivers/net/wireless/b43/pcmcia.c
+++ b/drivers/net/wireless/b43/pcmcia.c
@@ -35,6 +35,7 @@
static /*const */ struct pcmcia_device_id b43_pcmcia_tbl[] = {
PCMCIA_DEVICE_MANF_CARD(0x2D0, 0x448),
+ PCMCIA_DEVICE_MANF_CARD(0x2D0, 0x476),
PCMCIA_DEVICE_NULL,
};
diff --git a/drivers/net/wireless/b43legacy/b43legacy.h b/drivers/net/wireless/b43legacy/b43legacy.h
index 77fda148ac46..038baa8869e2 100644
--- a/drivers/net/wireless/b43legacy/b43legacy.h
+++ b/drivers/net/wireless/b43legacy/b43legacy.h
@@ -607,6 +607,7 @@ struct b43legacy_wl {
u8 nr_devs;
bool radiotap_enabled;
+ bool radio_enabled;
/* The beacon we are currently using (AP or IBSS mode).
* This beacon stuff is protected by the irq_lock. */
diff --git a/drivers/net/wireless/b43legacy/main.c b/drivers/net/wireless/b43legacy/main.c
index e5136fb65ddd..c4973c1942bf 100644
--- a/drivers/net/wireless/b43legacy/main.c
+++ b/drivers/net/wireless/b43legacy/main.c
@@ -2689,8 +2689,8 @@ static int b43legacy_op_dev_config(struct ieee80211_hw *hw,
/* Antennas for RX and management frame TX. */
b43legacy_mgmtframe_txantenna(dev, antenna_tx);
- if (!!conf->radio_enabled != phy->radio_on) {
- if (conf->radio_enabled) {
+ if (wl->radio_enabled != phy->radio_on) {
+ if (wl->radio_enabled) {
b43legacy_radio_turn_on(dev);
b43legacyinfo(dev->wl, "Radio turned on by software\n");
if (!dev->radio_hw_enable)
@@ -3441,6 +3441,7 @@ static int b43legacy_op_start(struct ieee80211_hw *hw)
wl->beacon0_uploaded = 0;
wl->beacon1_uploaded = 0;
wl->beacon_templates_virgin = 1;
+ wl->radio_enabled = 1;
mutex_lock(&wl->mutex);
@@ -3479,6 +3480,7 @@ static void b43legacy_op_stop(struct ieee80211_hw *hw)
if (b43legacy_status(dev) >= B43legacy_STAT_STARTED)
b43legacy_wireless_core_stop(dev);
b43legacy_wireless_core_exit(dev);
+ wl->radio_enabled = 0;
mutex_unlock(&wl->mutex);
}
@@ -3620,6 +3622,7 @@ static int b43legacy_wireless_core_attach(struct b43legacy_wldev *dev)
have_bphy = 1;
dev->phy.gmode = (have_gphy || have_bphy);
+ dev->phy.radio_on = 1;
tmp = dev->phy.gmode ? B43legacy_TMSLOW_GMODE : 0;
b43legacy_wireless_core_reset(dev, tmp);
diff --git a/drivers/net/wireless/iwmc3200wifi/Kconfig b/drivers/net/wireless/iwmc3200wifi/Kconfig
index 1eccb6df46dd..030401d367d3 100644
--- a/drivers/net/wireless/iwmc3200wifi/Kconfig
+++ b/drivers/net/wireless/iwmc3200wifi/Kconfig
@@ -4,6 +4,15 @@ config IWM
depends on CFG80211
select WIRELESS_EXT
select FW_LOADER
+ help
+ The Intel Wireless Multicomm 3200 hardware is a combo
+ card with GPS, Bluetooth, WiMax and 802.11 radios. It
+ runs over SDIO and is typically found on Moorestown
+ based platform. This driver takes care of the 802.11
+ part, which is a fullmac one.
+
+ If you choose to build it as a module, it'll be called
+ iwmc3200wifi.ko.
config IWM_DEBUG
bool "Enable full debugging output in iwmc3200wifi"
diff --git a/drivers/net/wireless/iwmc3200wifi/iwm.h b/drivers/net/wireless/iwmc3200wifi/iwm.h
index 635c16ee6186..77c339f8516c 100644
--- a/drivers/net/wireless/iwmc3200wifi/iwm.h
+++ b/drivers/net/wireless/iwmc3200wifi/iwm.h
@@ -288,6 +288,7 @@ struct iwm_priv {
u8 *eeprom;
struct timer_list watchdog;
struct work_struct reset_worker;
+ struct mutex mutex;
struct rfkill *rfkill;
char private[0] __attribute__((__aligned__(NETDEV_ALIGN)));
@@ -315,8 +316,11 @@ extern const struct iw_handler_def iwm_iw_handler_def;
void *iwm_if_alloc(int sizeof_bus, struct device *dev,
struct iwm_if_ops *if_ops);
void iwm_if_free(struct iwm_priv *iwm);
+int iwm_if_add(struct iwm_priv *iwm);
+void iwm_if_remove(struct iwm_priv *iwm);
int iwm_mode_to_nl80211_iftype(int mode);
int iwm_priv_init(struct iwm_priv *iwm);
+void iwm_priv_deinit(struct iwm_priv *iwm);
void iwm_reset(struct iwm_priv *iwm);
void iwm_tx_credit_init_pools(struct iwm_priv *iwm,
struct iwm_umac_notif_alive *alive);
diff --git a/drivers/net/wireless/iwmc3200wifi/main.c b/drivers/net/wireless/iwmc3200wifi/main.c
index 6a2640f16b6d..8be206d58222 100644
--- a/drivers/net/wireless/iwmc3200wifi/main.c
+++ b/drivers/net/wireless/iwmc3200wifi/main.c
@@ -112,6 +112,9 @@ static void iwm_statistics_request(struct work_struct *work)
iwm_send_umac_stats_req(iwm, 0);
}
+int __iwm_up(struct iwm_priv *iwm);
+int __iwm_down(struct iwm_priv *iwm);
+
static void iwm_reset_worker(struct work_struct *work)
{
struct iwm_priv *iwm;
@@ -120,6 +123,19 @@ static void iwm_reset_worker(struct work_struct *work)
iwm = container_of(work, struct iwm_priv, reset_worker);
+ /*
+ * XXX: The iwm->mutex is introduced purely for this reset work,
+ * because the other users for iwm_up and iwm_down are only netdev
+ * ndo_open and ndo_stop which are already protected by rtnl.
+ * Please remove iwm->mutex together if iwm_reset_worker() is not
+ * required in the future.
+ */
+ if (!mutex_trylock(&iwm->mutex)) {
+ IWM_WARN(iwm, "We are in the middle of interface bringing "
+ "UP/DOWN. Skip driver resetting.\n");
+ return;
+ }
+
if (iwm->umac_profile_active) {
profile = kmalloc(sizeof(struct iwm_umac_profile), GFP_KERNEL);
if (profile)
@@ -128,10 +144,10 @@ static void iwm_reset_worker(struct work_struct *work)
IWM_ERR(iwm, "Couldn't alloc memory for profile\n");
}
- iwm_down(iwm);
+ __iwm_down(iwm);
while (retry++ < 3) {
- ret = iwm_up(iwm);
+ ret = __iwm_up(iwm);
if (!ret)
break;
@@ -142,7 +158,7 @@ static void iwm_reset_worker(struct work_struct *work)
IWM_WARN(iwm, "iwm_up() failed: %d\n", ret);
kfree(profile);
- return;
+ goto out;
}
if (profile) {
@@ -151,6 +167,9 @@ static void iwm_reset_worker(struct work_struct *work)
iwm_send_mlme_profile(iwm);
kfree(profile);
}
+
+ out:
+ mutex_unlock(&iwm->mutex);
}
static void iwm_watchdog(unsigned long data)
@@ -215,10 +234,21 @@ int iwm_priv_init(struct iwm_priv *iwm)
init_timer(&iwm->watchdog);
iwm->watchdog.function = iwm_watchdog;
iwm->watchdog.data = (unsigned long)iwm;
+ mutex_init(&iwm->mutex);
return 0;
}
+void iwm_priv_deinit(struct iwm_priv *iwm)
+{
+ int i;
+
+ for (i = 0; i < IWM_TX_QUEUES; i++)
+ destroy_workqueue(iwm->txq[i].wq);
+
+ destroy_workqueue(iwm->rx_wq);
+}
+
/*
* We reset all the structures, and we reset the UMAC.
* After calling this routine, you're expected to reload
@@ -466,7 +496,7 @@ void iwm_link_off(struct iwm_priv *iwm)
iwm_rx_free(iwm);
- cancel_delayed_work(&iwm->stats_request);
+ cancel_delayed_work_sync(&iwm->stats_request);
memset(wstats, 0, sizeof(struct iw_statistics));
wstats->qual.updated = IW_QUAL_ALL_INVALID;
@@ -511,7 +541,7 @@ static int iwm_channels_init(struct iwm_priv *iwm)
return 0;
}
-int iwm_up(struct iwm_priv *iwm)
+int __iwm_up(struct iwm_priv *iwm)
{
int ret;
struct iwm_notif *notif_reboot, *notif_ack = NULL;
@@ -647,7 +677,18 @@ int iwm_up(struct iwm_priv *iwm)
return -EIO;
}
-int iwm_down(struct iwm_priv *iwm)
+int iwm_up(struct iwm_priv *iwm)
+{
+ int ret;
+
+ mutex_lock(&iwm->mutex);
+ ret = __iwm_up(iwm);
+ mutex_unlock(&iwm->mutex);
+
+ return ret;
+}
+
+int __iwm_down(struct iwm_priv *iwm)
{
int ret;
@@ -678,3 +719,14 @@ int iwm_down(struct iwm_priv *iwm)
return 0;
}
+
+int iwm_down(struct iwm_priv *iwm)
+{
+ int ret;
+
+ mutex_lock(&iwm->mutex);
+ ret = __iwm_down(iwm);
+ mutex_unlock(&iwm->mutex);
+
+ return ret;
+}
diff --git a/drivers/net/wireless/iwmc3200wifi/netdev.c b/drivers/net/wireless/iwmc3200wifi/netdev.c
index 68e2c3b6c7a1..aaa20c6885c8 100644
--- a/drivers/net/wireless/iwmc3200wifi/netdev.c
+++ b/drivers/net/wireless/iwmc3200wifi/netdev.c
@@ -114,32 +114,31 @@ void *iwm_if_alloc(int sizeof_bus, struct device *dev,
iwm = wdev_to_iwm(wdev);
iwm->bus_ops = if_ops;
iwm->wdev = wdev;
- iwm_priv_init(iwm);
+
+ ret = iwm_priv_init(iwm);
+ if (ret) {
+ dev_err(dev, "failed to init iwm_priv\n");
+ goto out_wdev;
+ }
+
wdev->iftype = iwm_mode_to_nl80211_iftype(iwm->conf.mode);
- ndev = alloc_netdev_mq(0, "wlan%d", ether_setup,
- IWM_TX_QUEUES);
+ ndev = alloc_netdev_mq(0, "wlan%d", ether_setup, IWM_TX_QUEUES);
if (!ndev) {
dev_err(dev, "no memory for network device instance\n");
- goto out_wdev;
+ goto out_priv;
}
ndev->netdev_ops = &iwm_netdev_ops;
ndev->wireless_handlers = &iwm_iw_handler_def;
ndev->ieee80211_ptr = wdev;
SET_NETDEV_DEV(ndev, wiphy_dev(wdev->wiphy));
- ret = register_netdev(ndev);
- if (ret < 0) {
- dev_err(dev, "Failed to register netdev: %d\n", ret);
- goto out_ndev;
- }
-
wdev->netdev = ndev;
return iwm;
- out_ndev:
- free_netdev(ndev);
+ out_priv:
+ iwm_priv_deinit(iwm);
out_wdev:
iwm_wdev_free(iwm);
@@ -148,15 +147,29 @@ void *iwm_if_alloc(int sizeof_bus, struct device *dev,
void iwm_if_free(struct iwm_priv *iwm)
{
- int i;
-
if (!iwm_to_ndev(iwm))
return;
- unregister_netdev(iwm_to_ndev(iwm));
free_netdev(iwm_to_ndev(iwm));
iwm_wdev_free(iwm);
- destroy_workqueue(iwm->rx_wq);
- for (i = 0; i < IWM_TX_QUEUES; i++)
- destroy_workqueue(iwm->txq[i].wq);
+ iwm_priv_deinit(iwm);
+}
+
+int iwm_if_add(struct iwm_priv *iwm)
+{
+ struct net_device *ndev = iwm_to_ndev(iwm);
+ int ret;
+
+ ret = register_netdev(ndev);
+ if (ret < 0) {
+ dev_err(&ndev->dev, "Failed to register netdev: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+void iwm_if_remove(struct iwm_priv *iwm)
+{
+ unregister_netdev(iwm_to_ndev(iwm));
}
diff --git a/drivers/net/wireless/iwmc3200wifi/sdio.c b/drivers/net/wireless/iwmc3200wifi/sdio.c
index b54da677b371..916681837fd2 100644
--- a/drivers/net/wireless/iwmc3200wifi/sdio.c
+++ b/drivers/net/wireless/iwmc3200wifi/sdio.c
@@ -454,10 +454,18 @@ static int iwm_sdio_probe(struct sdio_func *func,
INIT_WORK(&hw->isr_worker, iwm_sdio_isr_worker);
+ ret = iwm_if_add(iwm);
+ if (ret) {
+ dev_err(dev, "add SDIO interface failed\n");
+ goto destroy_wq;
+ }
+
dev_info(dev, "IWM SDIO probe\n");
return 0;
+ destroy_wq:
+ destroy_workqueue(hw->isr_wq);
debugfs_exit:
iwm_debugfs_exit(iwm);
if_free:
@@ -471,9 +479,10 @@ static void iwm_sdio_remove(struct sdio_func *func)
struct iwm_priv *iwm = hw_to_iwm(hw);
struct device *dev = &func->dev;
+ iwm_if_remove(iwm);
+ destroy_workqueue(hw->isr_wq);
iwm_debugfs_exit(iwm);
iwm_if_free(iwm);
- destroy_workqueue(hw->isr_wq);
sdio_set_drvdata(func, NULL);
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index e789c6e9938c..a111bda392e2 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -418,6 +418,7 @@ static bool mac80211_hwsim_tx_frame(struct ieee80211_hw *hw,
continue;
if (!data2->started || !hwsim_ps_rx_ok(data2, skb) ||
+ !data->channel || !data2->channel ||
data->channel->center_freq != data2->channel->center_freq ||
!(data->group & data2->group))
continue;
diff --git a/drivers/net/wireless/p54/p54common.c b/drivers/net/wireless/p54/p54common.c
index b618bd14583f..22ca122bd798 100644
--- a/drivers/net/wireless/p54/p54common.c
+++ b/drivers/net/wireless/p54/p54common.c
@@ -823,30 +823,30 @@ void p54_free_skb(struct ieee80211_hw *dev, struct sk_buff *skb)
struct p54_tx_info *range;
unsigned long flags;
- if (unlikely(!skb || !dev || skb_queue_empty(&priv->tx_queue)))
+ if (unlikely(!skb || !dev || !skb_queue_len(&priv->tx_queue)))
return;
- /* There used to be a check here to see if the SKB was on the
- * TX queue or not. This can never happen because all SKBs we
- * see here successfully went through p54_assign_address()
- * which means the SKB is on the ->tx_queue.
+ /*
+ * don't try to free an already unlinked skb
*/
+ if (unlikely((!skb->next) || (!skb->prev)))
+ return;
spin_lock_irqsave(&priv->tx_queue.lock, flags);
info = IEEE80211_SKB_CB(skb);
range = (void *)info->rate_driver_data;
- if (!skb_queue_is_first(&priv->tx_queue, skb)) {
+ if (skb->prev != (struct sk_buff *)&priv->tx_queue) {
struct ieee80211_tx_info *ni;
struct p54_tx_info *mr;
- ni = IEEE80211_SKB_CB(skb_queue_prev(&priv->tx_queue, skb));
+ ni = IEEE80211_SKB_CB(skb->prev);
mr = (struct p54_tx_info *)ni->rate_driver_data;
}
- if (!skb_queue_is_last(&priv->tx_queue, skb)) {
+ if (skb->next != (struct sk_buff *)&priv->tx_queue) {
struct ieee80211_tx_info *ni;
struct p54_tx_info *mr;
- ni = IEEE80211_SKB_CB(skb_queue_next(&priv->tx_queue, skb));
+ ni = IEEE80211_SKB_CB(skb->next);
mr = (struct p54_tx_info *)ni->rate_driver_data;
}
__skb_unlink(skb, &priv->tx_queue);
@@ -864,13 +864,15 @@ static struct sk_buff *p54_find_tx_entry(struct ieee80211_hw *dev,
unsigned long flags;
spin_lock_irqsave(&priv->tx_queue.lock, flags);
- skb_queue_walk(&priv->tx_queue, entry) {
+ entry = priv->tx_queue.next;
+ while (entry != (struct sk_buff *)&priv->tx_queue) {
struct p54_hdr *hdr = (struct p54_hdr *) entry->data;
if (hdr->req_id == req_id) {
spin_unlock_irqrestore(&priv->tx_queue.lock, flags);
return entry;
}
+ entry = entry->next;
}
spin_unlock_irqrestore(&priv->tx_queue.lock, flags);
return NULL;
@@ -888,33 +890,36 @@ static void p54_rx_frame_sent(struct ieee80211_hw *dev, struct sk_buff *skb)
int count, idx;
spin_lock_irqsave(&priv->tx_queue.lock, flags);
- skb_queue_walk(&priv->tx_queue, entry) {
+ entry = (struct sk_buff *) priv->tx_queue.next;
+ while (entry != (struct sk_buff *)&priv->tx_queue) {
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(entry);
struct p54_hdr *entry_hdr;
struct p54_tx_data *entry_data;
unsigned int pad = 0, frame_len;
range = (void *)info->rate_driver_data;
- if (range->start_addr != addr)
+ if (range->start_addr != addr) {
+ entry = entry->next;
continue;
+ }
- if (!skb_queue_is_last(&priv->tx_queue, entry)) {
+ if (entry->next != (struct sk_buff *)&priv->tx_queue) {
struct ieee80211_tx_info *ni;
struct p54_tx_info *mr;
- ni = IEEE80211_SKB_CB(skb_queue_next(&priv->tx_queue,
- entry));
+ ni = IEEE80211_SKB_CB(entry->next);
mr = (struct p54_tx_info *)ni->rate_driver_data;
}
__skb_unlink(entry, &priv->tx_queue);
- spin_unlock_irqrestore(&priv->tx_queue.lock, flags);
frame_len = entry->len;
entry_hdr = (struct p54_hdr *) entry->data;
entry_data = (struct p54_tx_data *) entry_hdr->data;
- priv->tx_stats[entry_data->hw_queue].len--;
+ if (priv->tx_stats[entry_data->hw_queue].len)
+ priv->tx_stats[entry_data->hw_queue].len--;
priv->stats.dot11ACKFailureCount += payload->tries - 1;
+ spin_unlock_irqrestore(&priv->tx_queue.lock, flags);
/*
* Frames in P54_QUEUE_FWSCAN and P54_QUEUE_BEACON are
@@ -1164,21 +1169,23 @@ static int p54_assign_address(struct ieee80211_hw *dev, struct sk_buff *skb,
}
}
- skb_queue_walk(&priv->tx_queue, entry) {
+ entry = priv->tx_queue.next;
+ while (left--) {
u32 hole_size;
info = IEEE80211_SKB_CB(entry);
range = (void *)info->rate_driver_data;
hole_size = range->start_addr - last_addr;
if (!target_skb && hole_size >= len) {
- target_skb = skb_queue_prev(&priv->tx_queue, entry);
+ target_skb = entry->prev;
hole_size -= len;
target_addr = last_addr;
}
largest_hole = max(largest_hole, hole_size);
last_addr = range->end_addr;
+ entry = entry->next;
}
if (!target_skb && priv->rx_end - last_addr >= len) {
- target_skb = skb_peek_tail(&priv->tx_queue);
+ target_skb = priv->tx_queue.prev;
largest_hole = max(largest_hole, priv->rx_end - last_addr - len);
if (!skb_queue_empty(&priv->tx_queue)) {
info = IEEE80211_SKB_CB(target_skb);
@@ -2084,6 +2091,7 @@ out:
static void p54_stop(struct ieee80211_hw *dev)
{
struct p54_common *priv = dev->priv;
+ struct sk_buff *skb;
mutex_lock(&priv->conf_mutex);
priv->mode = NL80211_IFTYPE_UNSPECIFIED;
@@ -2098,7 +2106,8 @@ static void p54_stop(struct ieee80211_hw *dev)
p54_tx_cancel(dev, priv->cached_beacon);
priv->stop(dev);
- skb_queue_purge(&priv->tx_queue);
+ while ((skb = skb_dequeue(&priv->tx_queue)))
+ kfree_skb(skb);
priv->cached_beacon = NULL;
priv->tsf_high32 = priv->tsf_low32 = 0;
mutex_unlock(&priv->conf_mutex);
diff --git a/drivers/net/wireless/ray_cs.c b/drivers/net/wireless/ray_cs.c
index b10b0383dfa5..698b11b1cadb 100644
--- a/drivers/net/wireless/ray_cs.c
+++ b/drivers/net/wireless/ray_cs.c
@@ -2427,11 +2427,10 @@ static void untranslate(ray_dev_t *local, struct sk_buff *skb, int len)
#ifdef PCMCIA_DEBUG
if (pc_debug > 3) {
- int i;
- printk(KERN_DEBUG "skb->data before untranslate");
- for (i = 0; i < 64; i++)
- printk("%02x ", skb->data[i]);
- printk("\n" KERN_DEBUG
+ print_hex_dump(KERN_DEBUG, "skb->data before untranslate: ",
+ DUMP_PREFIX_NONE, 16, 1,
+ skb->data, 64, true);
+ printk(KERN_DEBUG
"type = %08x, xsap = %02x%02x%02x, org = %02x02x02x\n",
ntohs(type), psnap->dsap, psnap->ssap, psnap->ctrl,
psnap->org[0], psnap->org[1], psnap->org[2]);
diff --git a/drivers/net/wireless/wavelan_cs.c b/drivers/net/wireless/wavelan_cs.c
index 6af706408ac0..c6d300666ad8 100644
--- a/drivers/net/wireless/wavelan_cs.c
+++ b/drivers/net/wireless/wavelan_cs.c
@@ -3556,17 +3556,8 @@ wv_82593_config(struct net_device * dev)
cfblk.rcvstop = TRUE; /* Enable Receive Stop Register */
#ifdef DEBUG_I82593_SHOW
- {
- u_char *c = (u_char *) &cfblk;
- int i;
- printk(KERN_DEBUG "wavelan_cs: config block:");
- for(i = 0; i < sizeof(struct i82593_conf_block); i++,c++)
- {
- if((i % 16) == 0) printk("\n" KERN_DEBUG);
- printk("%02x ", *c);
- }
- printk("\n");
- }
+ print_hex_dump(KERN_DEBUG, "wavelan_cs: config block: ", DUMP_PREFIX_NONE,
+ 16, 1, &cfblk, sizeof(struct i82593_conf_block), false);
#endif
/* Copy the config block to the i82593 */
diff --git a/drivers/net/wireless/zd1211rw/zd_usb.c b/drivers/net/wireless/zd1211rw/zd_usb.c
index f0e5e943f6e3..0e6e44689cc6 100644
--- a/drivers/net/wireless/zd1211rw/zd_usb.c
+++ b/drivers/net/wireless/zd1211rw/zd_usb.c
@@ -38,7 +38,6 @@ static struct usb_device_id usb_ids[] = {
/* ZD1211 */
{ USB_DEVICE(0x0ace, 0x1211), .driver_info = DEVICE_ZD1211 },
{ USB_DEVICE(0x0ace, 0xa211), .driver_info = DEVICE_ZD1211 },
- { USB_DEVICE(0x07b8, 0x6001), .driver_info = DEVICE_ZD1211 },
{ USB_DEVICE(0x126f, 0xa006), .driver_info = DEVICE_ZD1211 },
{ USB_DEVICE(0x6891, 0xa727), .driver_info = DEVICE_ZD1211 },
{ USB_DEVICE(0x0df6, 0x9071), .driver_info = DEVICE_ZD1211 },
@@ -61,12 +60,14 @@ static struct usb_device_id usb_ids[] = {
{ USB_DEVICE(0x157e, 0x300a), .driver_info = DEVICE_ZD1211 },
{ USB_DEVICE(0x0105, 0x145f), .driver_info = DEVICE_ZD1211 },
/* ZD1211B */
+ { USB_DEVICE(0x054c, 0x0257), .driver_info = DEVICE_ZD1211B },
{ USB_DEVICE(0x0ace, 0x1215), .driver_info = DEVICE_ZD1211B },
{ USB_DEVICE(0x0ace, 0xb215), .driver_info = DEVICE_ZD1211B },
{ USB_DEVICE(0x157e, 0x300d), .driver_info = DEVICE_ZD1211B },
{ USB_DEVICE(0x079b, 0x0062), .driver_info = DEVICE_ZD1211B },
{ USB_DEVICE(0x1582, 0x6003), .driver_info = DEVICE_ZD1211B },
{ USB_DEVICE(0x050d, 0x705c), .driver_info = DEVICE_ZD1211B },
+ { USB_DEVICE(0x083a, 0xe503), .driver_info = DEVICE_ZD1211B },
{ USB_DEVICE(0x083a, 0xe506), .driver_info = DEVICE_ZD1211B },
{ USB_DEVICE(0x083a, 0x4505), .driver_info = DEVICE_ZD1211B },
{ USB_DEVICE(0x0471, 0x1236), .driver_info = DEVICE_ZD1211B },
@@ -86,6 +87,7 @@ static struct usb_device_id usb_ids[] = {
{ USB_DEVICE(0x0471, 0x1237), .driver_info = DEVICE_ZD1211B },
{ USB_DEVICE(0x07fa, 0x1196), .driver_info = DEVICE_ZD1211B },
{ USB_DEVICE(0x0df6, 0x0036), .driver_info = DEVICE_ZD1211B },
+ { USB_DEVICE(0x07b8, 0x6001), .driver_info = DEVICE_ZD1211B },
/* "Driverless" devices that need ejecting */
{ USB_DEVICE(0x0ace, 0x2011), .driver_info = DEVICE_INSTALLER },
{ USB_DEVICE(0x0ace, 0x20ff), .driver_info = DEVICE_INSTALLER },
diff --git a/drivers/net/yellowfin.c b/drivers/net/yellowfin.c
index 3c7a5053f1da..a07580138e81 100644
--- a/drivers/net/yellowfin.c
+++ b/drivers/net/yellowfin.c
@@ -109,7 +109,7 @@ static int gx_fix;
/* These identify the driver base version and may not be removed. */
static const char version[] __devinitconst =
KERN_INFO DRV_NAME ".c:v1.05 1/09/2001 Written by Donald Becker <becker@scyld.com>\n"
- KERN_INFO " (unofficial 2.4.x port, " DRV_VERSION ", " DRV_RELDATE ")\n";
+ " (unofficial 2.4.x port, " DRV_VERSION ", " DRV_RELDATE ")\n";
MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
MODULE_DESCRIPTION("Packet Engines Yellowfin G-NIC Gigabit Ethernet driver");
@@ -700,12 +700,15 @@ static void yellowfin_tx_timeout(struct net_device *dev)
int i;
printk(KERN_WARNING " Rx ring %p: ", yp->rx_ring);
for (i = 0; i < RX_RING_SIZE; i++)
- printk(" %8.8x", yp->rx_ring[i].result_status);
- printk("\n"KERN_WARNING" Tx ring %p: ", yp->tx_ring);
+ printk(KERN_CONT " %8.8x",
+ yp->rx_ring[i].result_status);
+ printk(KERN_CONT "\n");
+ printk(KERN_WARNING" Tx ring %p: ", yp->tx_ring);
for (i = 0; i < TX_RING_SIZE; i++)
- printk(" %4.4x /%8.8x", yp->tx_status[i].tx_errs,
- yp->tx_ring[i].result_status);
- printk("\n");
+ printk(KERN_CONT " %4.4x /%8.8x",
+ yp->tx_status[i].tx_errs,
+ yp->tx_ring[i].result_status);
+ printk(KERN_CONT "\n");
}
/* If the hardware is found to hang regularly, we will update the code
@@ -1216,20 +1219,20 @@ static int yellowfin_close(struct net_device *dev)
#if defined(__i386__)
if (yellowfin_debug > 2) {
- printk("\n"KERN_DEBUG" Tx ring at %8.8llx:\n",
+ printk(KERN_DEBUG" Tx ring at %8.8llx:\n",
(unsigned long long)yp->tx_ring_dma);
for (i = 0; i < TX_RING_SIZE*2; i++)
- printk(" %c #%d desc. %8.8x %8.8x %8.8x %8.8x.\n",
+ printk(KERN_DEBUG " %c #%d desc. %8.8x %8.8x %8.8x %8.8x.\n",
ioread32(ioaddr + TxPtr) == (long)&yp->tx_ring[i] ? '>' : ' ',
i, yp->tx_ring[i].dbdma_cmd, yp->tx_ring[i].addr,
yp->tx_ring[i].branch_addr, yp->tx_ring[i].result_status);
printk(KERN_DEBUG " Tx status %p:\n", yp->tx_status);
for (i = 0; i < TX_RING_SIZE; i++)
- printk(" #%d status %4.4x %4.4x %4.4x %4.4x.\n",
+ printk(KERN_DEBUG " #%d status %4.4x %4.4x %4.4x %4.4x.\n",
i, yp->tx_status[i].tx_cnt, yp->tx_status[i].tx_errs,
yp->tx_status[i].total_tx_cnt, yp->tx_status[i].paused);
- printk("\n"KERN_DEBUG " Rx ring %8.8llx:\n",
+ printk(KERN_DEBUG " Rx ring %8.8llx:\n",
(unsigned long long)yp->rx_ring_dma);
for (i = 0; i < RX_RING_SIZE; i++) {
printk(KERN_DEBUG " %c #%d desc. %8.8x %8.8x %8.8x\n",
diff --git a/drivers/oprofile/oprofile_stats.c b/drivers/oprofile/oprofile_stats.c
index e1f6ce03705e..3c2270a8300c 100644
--- a/drivers/oprofile/oprofile_stats.c
+++ b/drivers/oprofile/oprofile_stats.c
@@ -33,6 +33,7 @@ void oprofile_reset_stats(void)
atomic_set(&oprofile_stats.sample_lost_no_mm, 0);
atomic_set(&oprofile_stats.sample_lost_no_mapping, 0);
atomic_set(&oprofile_stats.event_lost_overflow, 0);
+ atomic_set(&oprofile_stats.bt_lost_no_mapping, 0);
}
diff --git a/drivers/parisc/ccio-dma.c b/drivers/parisc/ccio-dma.c
index 5d610cbcfe80..0f0e0b919ef4 100644
--- a/drivers/parisc/ccio-dma.c
+++ b/drivers/parisc/ccio-dma.c
@@ -1134,7 +1134,7 @@ static const struct file_operations ccio_proc_bitmap_fops = {
.llseek = seq_lseek,
.release = single_release,
};
-#endif
+#endif /* CONFIG_PROC_FS */
/**
* ccio_find_ioc - Find the ioc in the ioc_list
@@ -1568,14 +1568,15 @@ static int __init ccio_probe(struct parisc_device *dev)
/* if this fails, no I/O cards will work, so may as well bug */
BUG_ON(dev->dev.platform_data == NULL);
HBA_DATA(dev->dev.platform_data)->iommu = ioc;
-
+
+#ifdef CONFIG_PROC_FS
if (ioc_count == 0) {
proc_create(MODULE_NAME, 0, proc_runway_root,
&ccio_proc_info_fops);
proc_create(MODULE_NAME"-bitmap", 0, proc_runway_root,
&ccio_proc_bitmap_fops);
}
-
+#endif
ioc_count++;
parisc_has_iommu();
diff --git a/drivers/parisc/dino.c b/drivers/parisc/dino.c
index 52ae0b1d470c..c590974e9815 100644
--- a/drivers/parisc/dino.c
+++ b/drivers/parisc/dino.c
@@ -353,7 +353,7 @@ static unsigned int dino_startup_irq(unsigned int irq)
return 0;
}
-static struct hw_interrupt_type dino_interrupt_type = {
+static struct irq_chip dino_interrupt_type = {
.typename = "GSC-PCI",
.startup = dino_startup_irq,
.shutdown = dino_disable_irq,
@@ -1019,22 +1019,22 @@ static int __init dino_probe(struct parisc_device *dev)
** It's not used to avoid chicken/egg problems
** with configuration accessor functions.
*/
- bus = pci_scan_bus_parented(&dev->dev, dino_current_bus,
- &dino_cfg_ops, NULL);
+ dino_dev->hba.hba_bus = bus = pci_scan_bus_parented(&dev->dev,
+ dino_current_bus, &dino_cfg_ops, NULL);
+
if(bus) {
- pci_bus_add_devices(bus);
/* This code *depends* on scanning being single threaded
* if it isn't, this global bus number count will fail
*/
dino_current_bus = bus->subordinate + 1;
pci_bus_assign_resources(bus);
+ pci_bus_add_devices(bus);
} else {
- printk(KERN_ERR "ERROR: failed to scan PCI bus on %s (probably duplicate bus number %d)\n",
+ printk(KERN_ERR "ERROR: failed to scan PCI bus on %s (duplicate bus number %d?)\n",
dev_name(&dev->dev), dino_current_bus);
/* increment the bus number in case of duplicates */
dino_current_bus++;
}
- dino_dev->hba.hba_bus = bus;
return 0;
}
diff --git a/drivers/parisc/eisa.c b/drivers/parisc/eisa.c
index 5b89f404e668..51220749cb65 100644
--- a/drivers/parisc/eisa.c
+++ b/drivers/parisc/eisa.c
@@ -188,7 +188,7 @@ static unsigned int eisa_startup_irq(unsigned int irq)
return 0;
}
-static struct hw_interrupt_type eisa_interrupt_type = {
+static struct irq_chip eisa_interrupt_type = {
.typename = "EISA",
.startup = eisa_startup_irq,
.shutdown = eisa_disable_irq,
diff --git a/drivers/parisc/eisa_enumerator.c b/drivers/parisc/eisa_enumerator.c
index c709ecc2b7f7..0be1d50645ab 100644
--- a/drivers/parisc/eisa_enumerator.c
+++ b/drivers/parisc/eisa_enumerator.c
@@ -101,7 +101,7 @@ static int configure_memory(const unsigned char *buf,
printk("memory %lx-%lx ", (unsigned long)res->start, (unsigned long)res->end);
result = request_resource(mem_parent, res);
if (result < 0) {
- printk("\n" KERN_ERR "EISA Enumerator: failed to claim EISA Bus address space!\n");
+ printk(KERN_ERR "EISA Enumerator: failed to claim EISA Bus address space!\n");
return result;
}
}
@@ -191,7 +191,7 @@ static int configure_port(const unsigned char *buf, struct resource *io_parent,
printk("ioports %lx-%lx ", (unsigned long)res->start, (unsigned long)res->end);
result = request_resource(io_parent, res);
if (result < 0) {
- printk("\n" KERN_ERR "EISA Enumerator: failed to claim EISA Bus address space!\n");
+ printk(KERN_ERR "EISA Enumerator: failed to claim EISA Bus address space!\n");
return result;
}
}
@@ -224,7 +224,7 @@ static int configure_port_init(const unsigned char *buf)
case HPEE_PORT_INIT_WIDTH_BYTE:
s=1;
if (c & HPEE_PORT_INIT_MASK) {
- printk("\n" KERN_WARNING "port_init: unverified mask attribute\n");
+ printk(KERN_WARNING "port_init: unverified mask attribute\n");
outb((inb(get_16(buf+len+1) &
get_8(buf+len+3)) |
get_8(buf+len+4)), get_16(buf+len+1));
@@ -249,7 +249,7 @@ static int configure_port_init(const unsigned char *buf)
case HPEE_PORT_INIT_WIDTH_DWORD:
s=4;
if (c & HPEE_PORT_INIT_MASK) {
- printk("\n" KERN_WARNING "port_init: unverified mask attribute\n");
+ printk(KERN_WARNING "port_init: unverified mask attribute\n");
outl((inl(get_16(buf+len+1) &
get_32(buf+len+3)) |
get_32(buf+len+7)), get_16(buf+len+1));
@@ -259,7 +259,7 @@ static int configure_port_init(const unsigned char *buf)
break;
default:
- printk("\n" KERN_ERR "Invalid port init word %02x\n", c);
+ printk(KERN_ERR "Invalid port init word %02x\n", c);
return 0;
}
@@ -297,7 +297,7 @@ static int configure_type_string(const unsigned char *buf)
/* just skip past the type field */
len = get_8(buf);
if (len > 80) {
- printk("\n" KERN_ERR "eisa_enumerator: type info field too long (%d, max is 80)\n", len);
+ printk(KERN_ERR "eisa_enumerator: type info field too long (%d, max is 80)\n", len);
}
return 1+len;
@@ -398,7 +398,7 @@ static int parse_slot_config(int slot,
}
if (p0 + function_len < pos) {
- printk("\n" KERN_ERR "eisa_enumerator: function %d length mis-match "
+ printk(KERN_ERR "eisa_enumerator: function %d length mis-match "
"got %d, expected %d\n",
num_func, pos-p0, function_len);
res=-1;
diff --git a/drivers/parisc/gsc.c b/drivers/parisc/gsc.c
index d33632917696..647adc9f85ad 100644
--- a/drivers/parisc/gsc.c
+++ b/drivers/parisc/gsc.c
@@ -148,7 +148,7 @@ static unsigned int gsc_asic_startup_irq(unsigned int irq)
return 0;
}
-static struct hw_interrupt_type gsc_asic_interrupt_type = {
+static struct irq_chip gsc_asic_interrupt_type = {
.typename = "GSC-ASIC",
.startup = gsc_asic_startup_irq,
.shutdown = gsc_asic_disable_irq,
@@ -158,7 +158,7 @@ static struct hw_interrupt_type gsc_asic_interrupt_type = {
.end = no_end_irq,
};
-int gsc_assign_irq(struct hw_interrupt_type *type, void *data)
+int gsc_assign_irq(struct irq_chip *type, void *data)
{
static int irq = GSC_IRQ_BASE;
struct irq_desc *desc;
diff --git a/drivers/parisc/gsc.h b/drivers/parisc/gsc.h
index 762a1babad60..b9d7bfb68e24 100644
--- a/drivers/parisc/gsc.h
+++ b/drivers/parisc/gsc.h
@@ -38,7 +38,7 @@ struct gsc_asic {
int gsc_common_setup(struct parisc_device *parent, struct gsc_asic *gsc_asic);
int gsc_alloc_irq(struct gsc_irq *dev); /* dev needs an irq */
int gsc_claim_irq(struct gsc_irq *dev, int irq); /* dev needs this irq */
-int gsc_assign_irq(struct hw_interrupt_type *type, void *data);
+int gsc_assign_irq(struct irq_chip *type, void *data);
int gsc_find_local_irq(unsigned int irq, int *global_irq, int limit);
void gsc_fixup_irqs(struct parisc_device *parent, void *ctrl,
void (*choose)(struct parisc_device *child, void *ctrl));
diff --git a/drivers/parisc/iosapic.c b/drivers/parisc/iosapic.c
index 4a9cc92d4d18..88e333553212 100644
--- a/drivers/parisc/iosapic.c
+++ b/drivers/parisc/iosapic.c
@@ -729,7 +729,7 @@ static int iosapic_set_affinity_irq(unsigned int irq,
}
#endif
-static struct hw_interrupt_type iosapic_interrupt_type = {
+static struct irq_chip iosapic_interrupt_type = {
.typename = "IO-SAPIC-level",
.startup = iosapic_startup_irq,
.shutdown = iosapic_disable_irq,
diff --git a/drivers/parisc/lba_pci.c b/drivers/parisc/lba_pci.c
index 59fbbf128365..ede614616f8e 100644
--- a/drivers/parisc/lba_pci.c
+++ b/drivers/parisc/lba_pci.c
@@ -980,28 +980,38 @@ static void
lba_pat_resources(struct parisc_device *pa_dev, struct lba_device *lba_dev)
{
unsigned long bytecnt;
- pdc_pat_cell_mod_maddr_block_t pa_pdc_cell; /* PA_VIEW */
- pdc_pat_cell_mod_maddr_block_t io_pdc_cell; /* IO_VIEW */
long io_count;
long status; /* PDC return status */
long pa_count;
+ pdc_pat_cell_mod_maddr_block_t *pa_pdc_cell; /* PA_VIEW */
+ pdc_pat_cell_mod_maddr_block_t *io_pdc_cell; /* IO_VIEW */
int i;
+ pa_pdc_cell = kzalloc(sizeof(pdc_pat_cell_mod_maddr_block_t), GFP_KERNEL);
+ if (!pa_pdc_cell)
+ return;
+
+ io_pdc_cell = kzalloc(sizeof(pdc_pat_cell_mod_maddr_block_t), GFP_KERNEL);
+ if (!pa_pdc_cell) {
+ kfree(pa_pdc_cell);
+ return;
+ }
+
/* return cell module (IO view) */
status = pdc_pat_cell_module(&bytecnt, pa_dev->pcell_loc, pa_dev->mod_index,
- PA_VIEW, & pa_pdc_cell);
- pa_count = pa_pdc_cell.mod[1];
+ PA_VIEW, pa_pdc_cell);
+ pa_count = pa_pdc_cell->mod[1];
status |= pdc_pat_cell_module(&bytecnt, pa_dev->pcell_loc, pa_dev->mod_index,
- IO_VIEW, &io_pdc_cell);
- io_count = io_pdc_cell.mod[1];
+ IO_VIEW, io_pdc_cell);
+ io_count = io_pdc_cell->mod[1];
/* We've already done this once for device discovery...*/
if (status != PDC_OK) {
panic("pdc_pat_cell_module() call failed for LBA!\n");
}
- if (PAT_GET_ENTITY(pa_pdc_cell.mod_info) != PAT_ENTITY_LBA) {
+ if (PAT_GET_ENTITY(pa_pdc_cell->mod_info) != PAT_ENTITY_LBA) {
panic("pdc_pat_cell_module() entity returned != PAT_ENTITY_LBA!\n");
}
@@ -1016,8 +1026,8 @@ lba_pat_resources(struct parisc_device *pa_dev, struct lba_device *lba_dev)
} *p, *io;
struct resource *r;
- p = (void *) &(pa_pdc_cell.mod[2+i*3]);
- io = (void *) &(io_pdc_cell.mod[2+i*3]);
+ p = (void *) &(pa_pdc_cell->mod[2+i*3]);
+ io = (void *) &(io_pdc_cell->mod[2+i*3]);
/* Convert the PAT range data to PCI "struct resource" */
switch(p->type & 0xff) {
@@ -1096,6 +1106,9 @@ lba_pat_resources(struct parisc_device *pa_dev, struct lba_device *lba_dev)
break;
}
}
+
+ kfree(pa_pdc_cell);
+ kfree(io_pdc_cell);
}
#else
/* keep compiler from complaining about missing declarations */
@@ -1509,10 +1522,6 @@ lba_driver_probe(struct parisc_device *dev)
lba_bus = lba_dev->hba.hba_bus =
pci_scan_bus_parented(&dev->dev, lba_dev->hba.bus_num.start,
cfg_ops, NULL);
- if (lba_bus) {
- lba_next_bus = lba_bus->subordinate + 1;
- pci_bus_add_devices(lba_bus);
- }
/* This is in lieu of calling pci_assign_unassigned_resources() */
if (is_pdc_pat()) {
@@ -1533,7 +1542,6 @@ lba_driver_probe(struct parisc_device *dev)
}
pci_enable_bridges(lba_bus);
-
/*
** Once PCI register ops has walked the bus, access to config
** space is restricted. Avoids master aborts on config cycles.
@@ -1543,6 +1551,11 @@ lba_driver_probe(struct parisc_device *dev)
lba_dev->flags |= LBA_FLAG_SKIP_PROBE;
}
+ if (lba_bus) {
+ lba_next_bus = lba_bus->subordinate + 1;
+ pci_bus_add_devices(lba_bus);
+ }
+
/* Whew! Finally done! Tell services we got this one covered. */
return 0;
}
diff --git a/drivers/parisc/sba_iommu.c b/drivers/parisc/sba_iommu.c
index d46dd57450ac..123d8fe3427d 100644
--- a/drivers/parisc/sba_iommu.c
+++ b/drivers/parisc/sba_iommu.c
@@ -2057,6 +2057,7 @@ void sba_directed_lmmio(struct parisc_device *pci_hba, struct resource *r)
r->start = (base & ~1UL) | PCI_F_EXTEND;
size = ~ READ_REG32(reg + LMMIO_DIRECT0_MASK);
r->end = r->start + size;
+ r->flags = IORESOURCE_MEM;
}
}
@@ -2093,4 +2094,5 @@ void sba_distributed_lmmio(struct parisc_device *pci_hba, struct resource *r )
size = (~READ_REG32(sba->sba_hpa + LMMIO_DIST_MASK)) / ROPES_PER_IOC;
r->start += rope * (size + 1); /* adjust base for this rope */
r->end = r->start + size;
+ r->flags = IORESOURCE_MEM;
}
diff --git a/drivers/parisc/superio.c b/drivers/parisc/superio.c
index 33e5ade774ca..675f04e6597a 100644
--- a/drivers/parisc/superio.c
+++ b/drivers/parisc/superio.c
@@ -325,7 +325,7 @@ static unsigned int superio_startup_irq(unsigned int irq)
return 0;
}
-static struct hw_interrupt_type superio_interrupt_type = {
+static struct irq_chip superio_interrupt_type = {
.typename = SUPERIO,
.startup = superio_startup_irq,
.shutdown = superio_disable_irq,
@@ -434,8 +434,8 @@ static void __init superio_parport_init(void)
0 /*base_hi*/,
PAR_IRQ,
PARPORT_DMA_NONE /* dma */,
- NULL /*struct pci_dev* */),
- 0 /* shared irq flags */ )
+ NULL /*struct pci_dev* */,
+ 0 /* shared irq flags */))
printk(KERN_WARNING PFX "Probing parallel port failed.\n");
#endif /* CONFIG_PARPORT_PC */
diff --git a/drivers/parport/parport_pc.c b/drivers/parport/parport_pc.c
index 151bf5bc8afe..2597145a066e 100644
--- a/drivers/parport/parport_pc.c
+++ b/drivers/parport/parport_pc.c
@@ -1471,11 +1471,13 @@ static void __devinit decode_smsc(int efer, int key, int devid, int devrev)
static void __devinit winbond_check(int io, int key)
{
- int devid, devrev, oldid, x_devid, x_devrev, x_oldid;
+ int origval, devid, devrev, oldid, x_devid, x_devrev, x_oldid;
if (!request_region(io, 3, __func__))
return;
+ origval = inb(io); /* Save original value */
+
/* First probe without key */
outb(0x20, io);
x_devid = inb(io + 1);
@@ -1495,6 +1497,8 @@ static void __devinit winbond_check(int io, int key)
oldid = inb(io + 1);
outb(0xaa, io); /* Magic Seal */
+ outb(origval, io); /* in case we poked some entirely different hardware */
+
if ((x_devid == devid) && (x_devrev == devrev) && (x_oldid == oldid))
goto out; /* protection against false positives */
@@ -1505,11 +1509,15 @@ out:
static void __devinit winbond_check2(int io, int key)
{
- int devid, devrev, oldid, x_devid, x_devrev, x_oldid;
+ int origval[3], devid, devrev, oldid, x_devid, x_devrev, x_oldid;
if (!request_region(io, 3, __func__))
return;
+ origval[0] = inb(io); /* Save original values */
+ origval[1] = inb(io + 1);
+ origval[2] = inb(io + 2);
+
/* First probe without the key */
outb(0x20, io + 2);
x_devid = inb(io + 2);
@@ -1528,6 +1536,10 @@ static void __devinit winbond_check2(int io, int key)
oldid = inb(io + 2);
outb(0xaa, io); /* Magic Seal */
+ outb(origval[0], io); /* in case we poked some entirely different hardware */
+ outb(origval[1], io + 1);
+ outb(origval[2], io + 2);
+
if (x_devid == devid && x_devrev == devrev && x_oldid == oldid)
goto out; /* protection against false positives */
@@ -1538,11 +1550,13 @@ out:
static void __devinit smsc_check(int io, int key)
{
- int id, rev, oldid, oldrev, x_id, x_rev, x_oldid, x_oldrev;
+ int origval, id, rev, oldid, oldrev, x_id, x_rev, x_oldid, x_oldrev;
if (!request_region(io, 3, __func__))
return;
+ origval = inb(io); /* Save original value */
+
/* First probe without the key */
outb(0x0d, io);
x_oldid = inb(io + 1);
@@ -1566,6 +1580,8 @@ static void __devinit smsc_check(int io, int key)
rev = inb(io + 1);
outb(0xaa, io); /* Magic Seal */
+ outb(origval, io); /* in case we poked some entirely different hardware */
+
if (x_id == id && x_oldrev == oldrev &&
x_oldid == oldid && x_rev == rev)
goto out; /* protection against false positives */
@@ -1602,11 +1618,12 @@ static void __devinit detect_and_report_smsc(void)
static void __devinit detect_and_report_it87(void)
{
u16 dev;
- u8 r;
+ u8 origval, r;
if (verbose_probing)
printk(KERN_DEBUG "IT8705 Super-IO detection, now testing port 2E ...\n");
- if (!request_region(0x2e, 1, __func__))
+ if (!request_region(0x2e, 2, __func__))
return;
+ origval = inb(0x2e); /* Save original value */
outb(0x87, 0x2e);
outb(0x01, 0x2e);
outb(0x55, 0x2e);
@@ -1626,8 +1643,10 @@ static void __devinit detect_and_report_it87(void)
outb(r | 8, 0x2F);
outb(0x02, 0x2E); /* Lock */
outb(0x02, 0x2F);
+ } else {
+ outb(origval, 0x2e); /* Oops, sorry to disturb */
}
- release_region(0x2e, 1);
+ release_region(0x2e, 2);
}
#endif /* CONFIG_PARPORT_PC_SUPERIO */
@@ -2271,6 +2290,9 @@ struct parport *parport_pc_probe_port(unsigned long int base,
if (IS_ERR(pdev))
return NULL;
dev = &pdev->dev;
+
+ dev->coherent_dma_mask = DMA_BIT_MASK(24);
+ dev->dma_mask = &dev->coherent_dma_mask;
}
ops = kmalloc(sizeof(struct parport_operations), GFP_KERNEL);
@@ -2885,6 +2907,7 @@ enum parport_pc_pci_cards {
netmos_9755,
netmos_9805,
netmos_9815,
+ netmos_9901,
quatech_sppxp100,
};
@@ -2965,7 +2988,7 @@ static struct parport_pc_pci {
/* netmos_9755 */ { 2, { { 0, 1 }, { 2, 3 },} },
/* netmos_9805 */ { 1, { { 0, -1 }, } },
/* netmos_9815 */ { 2, { { 0, -1 }, { 2, -1 }, } },
-
+ /* netmos_9901 */ { 1, { { 0, -1 }, } },
/* quatech_sppxp100 */ { 1, { { 0, 1 }, } },
};
@@ -3067,6 +3090,8 @@ static const struct pci_device_id parport_pc_pci_tbl[] = {
PCI_ANY_ID, PCI_ANY_ID, 0, 0, netmos_9805 },
{ PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9815,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, netmos_9815 },
+ { PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9901,
+ 0xA000, 0x2000, 0, 0, netmos_9901 },
/* Quatech SPPXP-100 Parallel port PCI ExpressCard */
{ PCI_VENDOR_ID_QUATECH, PCI_DEVICE_ID_QUATECH_SPPXP_100,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, quatech_sppxp100 },
diff --git a/drivers/pci/Makefile b/drivers/pci/Makefile
index b77ae6794275..1ebd6b4c743b 100644
--- a/drivers/pci/Makefile
+++ b/drivers/pci/Makefile
@@ -2,10 +2,11 @@
# Makefile for the PCI bus specific drivers.
#
-obj-y += access.o bus.o probe.o remove.o pci.o quirks.o slot.o \
+obj-y += access.o bus.o probe.o remove.o pci.o quirks.o \
pci-driver.o search.o pci-sysfs.o rom.o setup-res.o \
irq.o
obj-$(CONFIG_PROC_FS) += proc.o
+obj-$(CONFIG_SYSFS) += slot.o
# Build PCI Express stuff if needed
obj-$(CONFIG_PCIEPORTBUS) += pcie/
diff --git a/drivers/pci/access.c b/drivers/pci/access.c
index 0f3706512686..db23200c4874 100644
--- a/drivers/pci/access.c
+++ b/drivers/pci/access.c
@@ -66,6 +66,25 @@ EXPORT_SYMBOL(pci_bus_write_config_byte);
EXPORT_SYMBOL(pci_bus_write_config_word);
EXPORT_SYMBOL(pci_bus_write_config_dword);
+/**
+ * pci_bus_set_ops - Set raw operations of pci bus
+ * @bus: pci bus struct
+ * @ops: new raw operations
+ *
+ * Return previous raw operations
+ */
+struct pci_ops *pci_bus_set_ops(struct pci_bus *bus, struct pci_ops *ops)
+{
+ struct pci_ops *old_ops;
+ unsigned long flags;
+
+ spin_lock_irqsave(&pci_lock, flags);
+ old_ops = bus->ops;
+ bus->ops = ops;
+ spin_unlock_irqrestore(&pci_lock, flags);
+ return old_ops;
+}
+EXPORT_SYMBOL(pci_bus_set_ops);
/**
* pci_read_vpd - Read one entry from Vital Product Data
diff --git a/drivers/pci/bus.c b/drivers/pci/bus.c
index 97a8194063b5..cef28a79103f 100644
--- a/drivers/pci/bus.c
+++ b/drivers/pci/bus.c
@@ -41,9 +41,14 @@ pci_bus_alloc_resource(struct pci_bus *bus, struct resource *res,
void *alignf_data)
{
int i, ret = -ENOMEM;
+ resource_size_t max = -1;
type_mask |= IORESOURCE_IO | IORESOURCE_MEM;
+ /* don't allocate too high if the pref mem doesn't support 64bit*/
+ if (!(res->flags & IORESOURCE_MEM_64))
+ max = PCIBIOS_MAX_MEM_32;
+
for (i = 0; i < PCI_BUS_NUM_RESOURCES; i++) {
struct resource *r = bus->resource[i];
if (!r)
@@ -62,7 +67,7 @@ pci_bus_alloc_resource(struct pci_bus *bus, struct resource *res,
/* Ok, try it out.. */
ret = allocate_resource(r, res, size,
r->start ? : min,
- -1, align,
+ max, align,
alignf, alignf_data);
if (ret == 0)
break;
@@ -201,13 +206,18 @@ void pci_enable_bridges(struct pci_bus *bus)
* Walk the given bus, including any bridged devices
* on buses under this bus. Call the provided callback
* on each device found.
+ *
+ * We check the return of @cb each time. If it returns anything
+ * other than 0, we break out.
+ *
*/
-void pci_walk_bus(struct pci_bus *top, void (*cb)(struct pci_dev *, void *),
+void pci_walk_bus(struct pci_bus *top, int (*cb)(struct pci_dev *, void *),
void *userdata)
{
struct pci_dev *dev;
struct pci_bus *bus;
struct list_head *next;
+ int retval;
bus = top;
down_read(&pci_bus_sem);
@@ -231,8 +241,10 @@ void pci_walk_bus(struct pci_bus *top, void (*cb)(struct pci_dev *, void *),
/* Run device routines with the device locked */
down(&dev->dev.sem);
- cb(dev, userdata);
+ retval = cb(dev, userdata);
up(&dev->dev.sem);
+ if (retval)
+ break;
}
up_read(&pci_bus_sem);
}
diff --git a/drivers/pci/dmar.c b/drivers/pci/dmar.c
index fa3a11365ec3..7b287cb38b7a 100644
--- a/drivers/pci/dmar.c
+++ b/drivers/pci/dmar.c
@@ -267,6 +267,84 @@ rmrr_parse_dev(struct dmar_rmrr_unit *rmrru)
}
return ret;
}
+
+static LIST_HEAD(dmar_atsr_units);
+
+static int __init dmar_parse_one_atsr(struct acpi_dmar_header *hdr)
+{
+ struct acpi_dmar_atsr *atsr;
+ struct dmar_atsr_unit *atsru;
+
+ atsr = container_of(hdr, struct acpi_dmar_atsr, header);
+ atsru = kzalloc(sizeof(*atsru), GFP_KERNEL);
+ if (!atsru)
+ return -ENOMEM;
+
+ atsru->hdr = hdr;
+ atsru->include_all = atsr->flags & 0x1;
+
+ list_add(&atsru->list, &dmar_atsr_units);
+
+ return 0;
+}
+
+static int __init atsr_parse_dev(struct dmar_atsr_unit *atsru)
+{
+ int rc;
+ struct acpi_dmar_atsr *atsr;
+
+ if (atsru->include_all)
+ return 0;
+
+ atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
+ rc = dmar_parse_dev_scope((void *)(atsr + 1),
+ (void *)atsr + atsr->header.length,
+ &atsru->devices_cnt, &atsru->devices,
+ atsr->segment);
+ if (rc || !atsru->devices_cnt) {
+ list_del(&atsru->list);
+ kfree(atsru);
+ }
+
+ return rc;
+}
+
+int dmar_find_matched_atsr_unit(struct pci_dev *dev)
+{
+ int i;
+ struct pci_bus *bus;
+ struct acpi_dmar_atsr *atsr;
+ struct dmar_atsr_unit *atsru;
+
+ list_for_each_entry(atsru, &dmar_atsr_units, list) {
+ atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
+ if (atsr->segment == pci_domain_nr(dev->bus))
+ goto found;
+ }
+
+ return 0;
+
+found:
+ for (bus = dev->bus; bus; bus = bus->parent) {
+ struct pci_dev *bridge = bus->self;
+
+ if (!bridge || !bridge->is_pcie ||
+ bridge->pcie_type == PCI_EXP_TYPE_PCI_BRIDGE)
+ return 0;
+
+ if (bridge->pcie_type == PCI_EXP_TYPE_ROOT_PORT) {
+ for (i = 0; i < atsru->devices_cnt; i++)
+ if (atsru->devices[i] == bridge)
+ return 1;
+ break;
+ }
+ }
+
+ if (atsru->include_all)
+ return 1;
+
+ return 0;
+}
#endif
static void __init
@@ -274,22 +352,28 @@ dmar_table_print_dmar_entry(struct acpi_dmar_header *header)
{
struct acpi_dmar_hardware_unit *drhd;
struct acpi_dmar_reserved_memory *rmrr;
+ struct acpi_dmar_atsr *atsr;
switch (header->type) {
case ACPI_DMAR_TYPE_HARDWARE_UNIT:
- drhd = (struct acpi_dmar_hardware_unit *)header;
+ drhd = container_of(header, struct acpi_dmar_hardware_unit,
+ header);
printk (KERN_INFO PREFIX
- "DRHD (flags: 0x%08x)base: 0x%016Lx\n",
- drhd->flags, (unsigned long long)drhd->address);
+ "DRHD base: %#016Lx flags: %#x\n",
+ (unsigned long long)drhd->address, drhd->flags);
break;
case ACPI_DMAR_TYPE_RESERVED_MEMORY:
- rmrr = (struct acpi_dmar_reserved_memory *)header;
-
+ rmrr = container_of(header, struct acpi_dmar_reserved_memory,
+ header);
printk (KERN_INFO PREFIX
- "RMRR base: 0x%016Lx end: 0x%016Lx\n",
+ "RMRR base: %#016Lx end: %#016Lx\n",
(unsigned long long)rmrr->base_address,
(unsigned long long)rmrr->end_address);
break;
+ case ACPI_DMAR_TYPE_ATSR:
+ atsr = container_of(header, struct acpi_dmar_atsr, header);
+ printk(KERN_INFO PREFIX "ATSR flags: %#x\n", atsr->flags);
+ break;
}
}
@@ -363,6 +447,11 @@ parse_dmar_table(void)
ret = dmar_parse_one_rmrr(entry_header);
#endif
break;
+ case ACPI_DMAR_TYPE_ATSR:
+#ifdef CONFIG_DMAR
+ ret = dmar_parse_one_atsr(entry_header);
+#endif
+ break;
default:
printk(KERN_WARNING PREFIX
"Unknown DMAR structure type\n");
@@ -431,11 +520,19 @@ int __init dmar_dev_scope_init(void)
#ifdef CONFIG_DMAR
{
struct dmar_rmrr_unit *rmrr, *rmrr_n;
+ struct dmar_atsr_unit *atsr, *atsr_n;
+
list_for_each_entry_safe(rmrr, rmrr_n, &dmar_rmrr_units, list) {
ret = rmrr_parse_dev(rmrr);
if (ret)
return ret;
}
+
+ list_for_each_entry_safe(atsr, atsr_n, &dmar_atsr_units, list) {
+ ret = atsr_parse_dev(atsr);
+ if (ret)
+ return ret;
+ }
}
#endif
@@ -468,6 +565,9 @@ int __init dmar_table_init(void)
#ifdef CONFIG_DMAR
if (list_empty(&dmar_rmrr_units))
printk(KERN_INFO PREFIX "No RMRR found\n");
+
+ if (list_empty(&dmar_atsr_units))
+ printk(KERN_INFO PREFIX "No ATSR found\n");
#endif
#ifdef CONFIG_INTR_REMAP
@@ -515,6 +615,7 @@ int alloc_iommu(struct dmar_drhd_unit *drhd)
u32 ver;
static int iommu_allocated = 0;
int agaw = 0;
+ int msagaw = 0;
iommu = kzalloc(sizeof(*iommu), GFP_KERNEL);
if (!iommu)
@@ -535,12 +636,20 @@ int alloc_iommu(struct dmar_drhd_unit *drhd)
agaw = iommu_calculate_agaw(iommu);
if (agaw < 0) {
printk(KERN_ERR
- "Cannot get a valid agaw for iommu (seq_id = %d)\n",
+ "Cannot get a valid agaw for iommu (seq_id = %d)\n",
+ iommu->seq_id);
+ goto error;
+ }
+ msagaw = iommu_calculate_max_sagaw(iommu);
+ if (msagaw < 0) {
+ printk(KERN_ERR
+ "Cannot get a valid max agaw for iommu (seq_id = %d)\n",
iommu->seq_id);
goto error;
}
#endif
iommu->agaw = agaw;
+ iommu->msagaw = msagaw;
/* the registers might be more than one page */
map_size = max_t(int, ecap_max_iotlb_offset(iommu->ecap),
@@ -590,7 +699,8 @@ void free_iommu(struct intel_iommu *iommu)
*/
static inline void reclaim_free_desc(struct q_inval *qi)
{
- while (qi->desc_status[qi->free_tail] == QI_DONE) {
+ while (qi->desc_status[qi->free_tail] == QI_DONE ||
+ qi->desc_status[qi->free_tail] == QI_ABORT) {
qi->desc_status[qi->free_tail] = QI_FREE;
qi->free_tail = (qi->free_tail + 1) % QI_LENGTH;
qi->free_cnt++;
@@ -600,10 +710,13 @@ static inline void reclaim_free_desc(struct q_inval *qi)
static int qi_check_fault(struct intel_iommu *iommu, int index)
{
u32 fault;
- int head;
+ int head, tail;
struct q_inval *qi = iommu->qi;
int wait_index = (index + 1) % QI_LENGTH;
+ if (qi->desc_status[wait_index] == QI_ABORT)
+ return -EAGAIN;
+
fault = readl(iommu->reg + DMAR_FSTS_REG);
/*
@@ -613,7 +726,11 @@ static int qi_check_fault(struct intel_iommu *iommu, int index)
*/
if (fault & DMA_FSTS_IQE) {
head = readl(iommu->reg + DMAR_IQH_REG);
- if ((head >> 4) == index) {
+ if ((head >> DMAR_IQ_SHIFT) == index) {
+ printk(KERN_ERR "VT-d detected invalid descriptor: "
+ "low=%llx, high=%llx\n",
+ (unsigned long long)qi->desc[index].low,
+ (unsigned long long)qi->desc[index].high);
memcpy(&qi->desc[index], &qi->desc[wait_index],
sizeof(struct qi_desc));
__iommu_flush_cache(iommu, &qi->desc[index],
@@ -623,6 +740,32 @@ static int qi_check_fault(struct intel_iommu *iommu, int index)
}
}
+ /*
+ * If ITE happens, all pending wait_desc commands are aborted.
+ * No new descriptors are fetched until the ITE is cleared.
+ */
+ if (fault & DMA_FSTS_ITE) {
+ head = readl(iommu->reg + DMAR_IQH_REG);
+ head = ((head >> DMAR_IQ_SHIFT) - 1 + QI_LENGTH) % QI_LENGTH;
+ head |= 1;
+ tail = readl(iommu->reg + DMAR_IQT_REG);
+ tail = ((tail >> DMAR_IQ_SHIFT) - 1 + QI_LENGTH) % QI_LENGTH;
+
+ writel(DMA_FSTS_ITE, iommu->reg + DMAR_FSTS_REG);
+
+ do {
+ if (qi->desc_status[head] == QI_IN_USE)
+ qi->desc_status[head] = QI_ABORT;
+ head = (head - 2 + QI_LENGTH) % QI_LENGTH;
+ } while (head != tail);
+
+ if (qi->desc_status[wait_index] == QI_ABORT)
+ return -EAGAIN;
+ }
+
+ if (fault & DMA_FSTS_ICE)
+ writel(DMA_FSTS_ICE, iommu->reg + DMAR_FSTS_REG);
+
return 0;
}
@@ -632,7 +775,7 @@ static int qi_check_fault(struct intel_iommu *iommu, int index)
*/
int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu)
{
- int rc = 0;
+ int rc;
struct q_inval *qi = iommu->qi;
struct qi_desc *hw, wait_desc;
int wait_index, index;
@@ -643,6 +786,9 @@ int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu)
hw = qi->desc;
+restart:
+ rc = 0;
+
spin_lock_irqsave(&qi->q_lock, flags);
while (qi->free_cnt < 3) {
spin_unlock_irqrestore(&qi->q_lock, flags);
@@ -673,7 +819,7 @@ int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu)
* update the HW tail register indicating the presence of
* new descriptors.
*/
- writel(qi->free_head << 4, iommu->reg + DMAR_IQT_REG);
+ writel(qi->free_head << DMAR_IQ_SHIFT, iommu->reg + DMAR_IQT_REG);
while (qi->desc_status[wait_index] != QI_DONE) {
/*
@@ -685,18 +831,21 @@ int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu)
*/
rc = qi_check_fault(iommu, index);
if (rc)
- goto out;
+ break;
spin_unlock(&qi->q_lock);
cpu_relax();
spin_lock(&qi->q_lock);
}
-out:
- qi->desc_status[index] = qi->desc_status[wait_index] = QI_DONE;
+
+ qi->desc_status[index] = QI_DONE;
reclaim_free_desc(qi);
spin_unlock_irqrestore(&qi->q_lock, flags);
+ if (rc == -EAGAIN)
+ goto restart;
+
return rc;
}
@@ -714,41 +863,26 @@ void qi_global_iec(struct intel_iommu *iommu)
qi_submit_sync(&desc, iommu);
}
-int qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm,
- u64 type, int non_present_entry_flush)
+void qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm,
+ u64 type)
{
struct qi_desc desc;
- if (non_present_entry_flush) {
- if (!cap_caching_mode(iommu->cap))
- return 1;
- else
- did = 0;
- }
-
desc.low = QI_CC_FM(fm) | QI_CC_SID(sid) | QI_CC_DID(did)
| QI_CC_GRAN(type) | QI_CC_TYPE;
desc.high = 0;
- return qi_submit_sync(&desc, iommu);
+ qi_submit_sync(&desc, iommu);
}
-int qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
- unsigned int size_order, u64 type,
- int non_present_entry_flush)
+void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
+ unsigned int size_order, u64 type)
{
u8 dw = 0, dr = 0;
struct qi_desc desc;
int ih = 0;
- if (non_present_entry_flush) {
- if (!cap_caching_mode(iommu->cap))
- return 1;
- else
- did = 0;
- }
-
if (cap_write_drain(iommu->cap))
dw = 1;
@@ -760,7 +894,28 @@ int qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
desc.high = QI_IOTLB_ADDR(addr) | QI_IOTLB_IH(ih)
| QI_IOTLB_AM(size_order);
- return qi_submit_sync(&desc, iommu);
+ qi_submit_sync(&desc, iommu);
+}
+
+void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 qdep,
+ u64 addr, unsigned mask)
+{
+ struct qi_desc desc;
+
+ if (mask) {
+ BUG_ON(addr & ((1 << (VTD_PAGE_SHIFT + mask)) - 1));
+ addr |= (1 << (VTD_PAGE_SHIFT + mask - 1)) - 1;
+ desc.high = QI_DEV_IOTLB_ADDR(addr) | QI_DEV_IOTLB_SIZE;
+ } else
+ desc.high = QI_DEV_IOTLB_ADDR(addr);
+
+ if (qdep >= QI_DEV_IOTLB_MAX_INVS)
+ qdep = 0;
+
+ desc.low = QI_DEV_IOTLB_SID(sid) | QI_DEV_IOTLB_QDEP(qdep) |
+ QI_DIOTLB_TYPE;
+
+ qi_submit_sync(&desc, iommu);
}
/*
@@ -790,7 +945,6 @@ void dmar_disable_qi(struct intel_iommu *iommu)
cpu_relax();
iommu->gcmd &= ~DMA_GCMD_QIE;
-
writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl,
@@ -804,7 +958,7 @@ end:
*/
static void __dmar_enable_qi(struct intel_iommu *iommu)
{
- u32 cmd, sts;
+ u32 sts;
unsigned long flags;
struct q_inval *qi = iommu->qi;
@@ -818,9 +972,8 @@ static void __dmar_enable_qi(struct intel_iommu *iommu)
dmar_writeq(iommu->reg + DMAR_IQA_REG, virt_to_phys(qi->desc));
- cmd = iommu->gcmd | DMA_GCMD_QIE;
iommu->gcmd |= DMA_GCMD_QIE;
- writel(cmd, iommu->reg + DMAR_GCMD_REG);
+ writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
/* Make sure hardware complete it */
IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl, (sts & DMA_GSTS_QIES), sts);
@@ -1096,7 +1249,7 @@ int dmar_set_interrupt(struct intel_iommu *iommu)
set_irq_data(irq, NULL);
iommu->irq = 0;
destroy_irq(irq);
- return 0;
+ return ret;
}
ret = request_irq(irq, dmar_fault, 0, iommu->name, iommu);
diff --git a/drivers/pci/hotplug/Kconfig b/drivers/pci/hotplug/Kconfig
index 9aa4fe100a0d..66f29bc00be4 100644
--- a/drivers/pci/hotplug/Kconfig
+++ b/drivers/pci/hotplug/Kconfig
@@ -4,7 +4,7 @@
menuconfig HOTPLUG_PCI
tristate "Support for PCI Hotplug"
- depends on PCI && HOTPLUG
+ depends on PCI && HOTPLUG && SYSFS
---help---
Say Y here if you have a motherboard with a PCI Hotplug controller.
This allows you to add and remove PCI cards while the machine is
@@ -41,7 +41,7 @@ config HOTPLUG_PCI_FAKE
config HOTPLUG_PCI_COMPAQ
tristate "Compaq PCI Hotplug driver"
- depends on X86 && PCI_BIOS && PCI_LEGACY
+ depends on X86 && PCI_BIOS
help
Say Y here if you have a motherboard with a Compaq PCI Hotplug
controller.
diff --git a/drivers/pci/hotplug/acpi_pcihp.c b/drivers/pci/hotplug/acpi_pcihp.c
index fbc63d5e459f..eb159587d0bf 100644
--- a/drivers/pci/hotplug/acpi_pcihp.c
+++ b/drivers/pci/hotplug/acpi_pcihp.c
@@ -354,7 +354,7 @@ acpi_status acpi_get_hp_params_from_firmware(struct pci_bus *bus,
status = acpi_run_hpp(handle, hpp);
if (ACPI_SUCCESS(status))
break;
- if (acpi_root_bridge(handle))
+ if (acpi_is_root_bridge(handle))
break;
status = acpi_get_parent(handle, &phandle);
if (ACPI_FAILURE(status))
@@ -428,7 +428,7 @@ int acpi_get_hp_hw_control_from_firmware(struct pci_dev *pdev, u32 flags)
status = acpi_run_oshp(handle);
if (ACPI_SUCCESS(status))
goto got_one;
- if (acpi_root_bridge(handle))
+ if (acpi_is_root_bridge(handle))
break;
chandle = handle;
status = acpi_get_parent(chandle, &handle);
@@ -449,42 +449,6 @@ got_one:
}
EXPORT_SYMBOL(acpi_get_hp_hw_control_from_firmware);
-/* acpi_root_bridge - check to see if this acpi object is a root bridge
- *
- * @handle - the acpi object in question.
- */
-int acpi_root_bridge(acpi_handle handle)
-{
- acpi_status status;
- struct acpi_device_info *info;
- struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};
- int i;
-
- status = acpi_get_object_info(handle, &buffer);
- if (ACPI_SUCCESS(status)) {
- info = buffer.pointer;
- if ((info->valid & ACPI_VALID_HID) &&
- !strcmp(PCI_ROOT_HID_STRING,
- info->hardware_id.value)) {
- kfree(buffer.pointer);
- return 1;
- }
- if (info->valid & ACPI_VALID_CID) {
- for (i=0; i < info->compatibility_id.count; i++) {
- if (!strcmp(PCI_ROOT_HID_STRING,
- info->compatibility_id.id[i].value)) {
- kfree(buffer.pointer);
- return 1;
- }
- }
- }
- kfree(buffer.pointer);
- }
- return 0;
-}
-EXPORT_SYMBOL_GPL(acpi_root_bridge);
-
-
static int is_ejectable(acpi_handle handle)
{
acpi_status status;
diff --git a/drivers/pci/hotplug/acpiphp_core.c b/drivers/pci/hotplug/acpiphp_core.c
index 43c10bd261b4..4dd7114964ac 100644
--- a/drivers/pci/hotplug/acpiphp_core.c
+++ b/drivers/pci/hotplug/acpiphp_core.c
@@ -77,7 +77,6 @@ static int get_latch_status (struct hotplug_slot *slot, u8 *value);
static int get_adapter_status (struct hotplug_slot *slot, u8 *value);
static struct hotplug_slot_ops acpi_hotplug_slot_ops = {
- .owner = THIS_MODULE,
.enable_slot = enable_slot,
.disable_slot = disable_slot,
.set_attention_status = set_attention_status,
diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c
index 3a6064bce561..0cb0f830a993 100644
--- a/drivers/pci/hotplug/acpiphp_glue.c
+++ b/drivers/pci/hotplug/acpiphp_glue.c
@@ -678,18 +678,9 @@ static void remove_bridge(acpi_handle handle)
static struct pci_dev * get_apic_pci_info(acpi_handle handle)
{
- struct acpi_pci_id id;
- struct pci_bus *bus;
struct pci_dev *dev;
- if (ACPI_FAILURE(acpi_get_pci_id(handle, &id)))
- return NULL;
-
- bus = pci_find_bus(id.segment, id.bus);
- if (!bus)
- return NULL;
-
- dev = pci_get_slot(bus, PCI_DEVFN(id.device, id.function));
+ dev = acpi_get_pci_dev(handle);
if (!dev)
return NULL;
@@ -1396,19 +1387,16 @@ static void acpiphp_sanitize_bus(struct pci_bus *bus)
/* Program resources in newly inserted bridge */
static int acpiphp_configure_bridge (acpi_handle handle)
{
- struct acpi_pci_id pci_id;
+ struct pci_dev *dev;
struct pci_bus *bus;
- if (ACPI_FAILURE(acpi_get_pci_id(handle, &pci_id))) {
+ dev = acpi_get_pci_dev(handle);
+ if (!dev) {
err("cannot get PCI domain and bus number for bridge\n");
return -EINVAL;
}
- bus = pci_find_bus(pci_id.segment, pci_id.bus);
- if (!bus) {
- err("cannot find bus %d:%d\n",
- pci_id.segment, pci_id.bus);
- return -EINVAL;
- }
+
+ bus = dev->bus;
pci_bus_size_bridges(bus);
pci_bus_assign_resources(bus);
@@ -1416,6 +1404,7 @@ static int acpiphp_configure_bridge (acpi_handle handle)
acpiphp_set_hpp_values(handle, bus);
pci_enable_bridges(bus);
acpiphp_configure_ioapics(handle);
+ pci_dev_put(dev);
return 0;
}
@@ -1631,7 +1620,7 @@ find_root_bridges(acpi_handle handle, u32 lvl, void *context, void **rv)
{
int *count = (int *)context;
- if (acpi_root_bridge(handle)) {
+ if (acpi_is_root_bridge(handle)) {
acpi_install_notify_handler(handle, ACPI_SYSTEM_NOTIFY,
handle_hotplug_event_bridge, NULL);
(*count)++;
diff --git a/drivers/pci/hotplug/cpci_hotplug_core.c b/drivers/pci/hotplug/cpci_hotplug_core.c
index de94f4feef8c..a5b9f6ae507b 100644
--- a/drivers/pci/hotplug/cpci_hotplug_core.c
+++ b/drivers/pci/hotplug/cpci_hotplug_core.c
@@ -72,7 +72,6 @@ static int get_adapter_status(struct hotplug_slot *slot, u8 * value);
static int get_latch_status(struct hotplug_slot *slot, u8 * value);
static struct hotplug_slot_ops cpci_hotplug_slot_ops = {
- .owner = THIS_MODULE,
.enable_slot = enable_slot,
.disable_slot = disable_slot,
.set_attention_status = set_attention_status,
diff --git a/drivers/pci/hotplug/cpqphp.h b/drivers/pci/hotplug/cpqphp.h
index afaf8f69f73e..53836001d511 100644
--- a/drivers/pci/hotplug/cpqphp.h
+++ b/drivers/pci/hotplug/cpqphp.h
@@ -150,25 +150,25 @@ struct ctrl_reg { /* offset */
/* offsets to the controller registers based on the above structure layout */
enum ctrl_offsets {
- SLOT_RST = offsetof(struct ctrl_reg, slot_RST),
+ SLOT_RST = offsetof(struct ctrl_reg, slot_RST),
SLOT_ENABLE = offsetof(struct ctrl_reg, slot_enable),
MISC = offsetof(struct ctrl_reg, misc),
LED_CONTROL = offsetof(struct ctrl_reg, led_control),
INT_INPUT_CLEAR = offsetof(struct ctrl_reg, int_input_clear),
- INT_MASK = offsetof(struct ctrl_reg, int_mask),
- CTRL_RESERVED0 = offsetof(struct ctrl_reg, reserved0),
+ INT_MASK = offsetof(struct ctrl_reg, int_mask),
+ CTRL_RESERVED0 = offsetof(struct ctrl_reg, reserved0),
CTRL_RESERVED1 = offsetof(struct ctrl_reg, reserved1),
CTRL_RESERVED2 = offsetof(struct ctrl_reg, reserved1),
- GEN_OUTPUT_AB = offsetof(struct ctrl_reg, gen_output_AB),
- NON_INT_INPUT = offsetof(struct ctrl_reg, non_int_input),
+ GEN_OUTPUT_AB = offsetof(struct ctrl_reg, gen_output_AB),
+ NON_INT_INPUT = offsetof(struct ctrl_reg, non_int_input),
CTRL_RESERVED3 = offsetof(struct ctrl_reg, reserved3),
CTRL_RESERVED4 = offsetof(struct ctrl_reg, reserved4),
CTRL_RESERVED5 = offsetof(struct ctrl_reg, reserved5),
CTRL_RESERVED6 = offsetof(struct ctrl_reg, reserved6),
CTRL_RESERVED7 = offsetof(struct ctrl_reg, reserved7),
CTRL_RESERVED8 = offsetof(struct ctrl_reg, reserved8),
- SLOT_MASK = offsetof(struct ctrl_reg, slot_mask),
- CTRL_RESERVED9 = offsetof(struct ctrl_reg, reserved9),
+ SLOT_MASK = offsetof(struct ctrl_reg, slot_mask),
+ CTRL_RESERVED9 = offsetof(struct ctrl_reg, reserved9),
CTRL_RESERVED10 = offsetof(struct ctrl_reg, reserved10),
CTRL_RESERVED11 = offsetof(struct ctrl_reg, reserved11),
SLOT_SERR = offsetof(struct ctrl_reg, slot_SERR),
@@ -190,7 +190,9 @@ struct hrt {
u32 reserved2;
} __attribute__ ((packed));
-/* offsets to the hotplug resource table registers based on the above structure layout */
+/* offsets to the hotplug resource table registers based on the above
+ * structure layout
+ */
enum hrt_offsets {
SIG0 = offsetof(struct hrt, sig0),
SIG1 = offsetof(struct hrt, sig1),
@@ -217,18 +219,20 @@ struct slot_rt {
u16 pre_mem_length;
} __attribute__ ((packed));
-/* offsets to the hotplug slot resource table registers based on the above structure layout */
+/* offsets to the hotplug slot resource table registers based on the above
+ * structure layout
+ */
enum slot_rt_offsets {
DEV_FUNC = offsetof(struct slot_rt, dev_func),
- PRIMARY_BUS = offsetof(struct slot_rt, primary_bus),
- SECONDARY_BUS = offsetof(struct slot_rt, secondary_bus),
- MAX_BUS = offsetof(struct slot_rt, max_bus),
- IO_BASE = offsetof(struct slot_rt, io_base),
- IO_LENGTH = offsetof(struct slot_rt, io_length),
- MEM_BASE = offsetof(struct slot_rt, mem_base),
- MEM_LENGTH = offsetof(struct slot_rt, mem_length),
- PRE_MEM_BASE = offsetof(struct slot_rt, pre_mem_base),
- PRE_MEM_LENGTH = offsetof(struct slot_rt, pre_mem_length),
+ PRIMARY_BUS = offsetof(struct slot_rt, primary_bus),
+ SECONDARY_BUS = offsetof(struct slot_rt, secondary_bus),
+ MAX_BUS = offsetof(struct slot_rt, max_bus),
+ IO_BASE = offsetof(struct slot_rt, io_base),
+ IO_LENGTH = offsetof(struct slot_rt, io_length),
+ MEM_BASE = offsetof(struct slot_rt, mem_base),
+ MEM_LENGTH = offsetof(struct slot_rt, mem_length),
+ PRE_MEM_BASE = offsetof(struct slot_rt, pre_mem_base),
+ PRE_MEM_LENGTH = offsetof(struct slot_rt, pre_mem_length),
};
struct pci_func {
@@ -286,8 +290,8 @@ struct event_info {
struct controller {
struct controller *next;
u32 ctrl_int_comp;
- struct mutex crit_sect; /* critical section mutex */
- void __iomem *hpc_reg; /* cookie for our pci controller location */
+ struct mutex crit_sect; /* critical section mutex */
+ void __iomem *hpc_reg; /* cookie for our pci controller location */
struct pci_resource *mem_head;
struct pci_resource *p_mem_head;
struct pci_resource *io_head;
@@ -299,7 +303,7 @@ struct controller {
u8 next_event;
u8 interrupt;
u8 cfgspc_irq;
- u8 bus; /* bus number for the pci hotplug controller */
+ u8 bus; /* bus number for the pci hotplug controller */
u8 rev;
u8 slot_device_offset;
u8 first_slot;
@@ -401,46 +405,57 @@ struct resource_lists {
/* debugfs functions for the hotplug controller info */
-extern void cpqhp_initialize_debugfs (void);
-extern void cpqhp_shutdown_debugfs (void);
-extern void cpqhp_create_debugfs_files (struct controller *ctrl);
-extern void cpqhp_remove_debugfs_files (struct controller *ctrl);
+extern void cpqhp_initialize_debugfs(void);
+extern void cpqhp_shutdown_debugfs(void);
+extern void cpqhp_create_debugfs_files(struct controller *ctrl);
+extern void cpqhp_remove_debugfs_files(struct controller *ctrl);
/* controller functions */
-extern void cpqhp_pushbutton_thread (unsigned long event_pointer);
-extern irqreturn_t cpqhp_ctrl_intr (int IRQ, void *data);
-extern int cpqhp_find_available_resources (struct controller *ctrl, void __iomem *rom_start);
-extern int cpqhp_event_start_thread (void);
-extern void cpqhp_event_stop_thread (void);
-extern struct pci_func *cpqhp_slot_create (unsigned char busnumber);
-extern struct pci_func *cpqhp_slot_find (unsigned char bus, unsigned char device, unsigned char index);
-extern int cpqhp_process_SI (struct controller *ctrl, struct pci_func *func);
-extern int cpqhp_process_SS (struct controller *ctrl, struct pci_func *func);
-extern int cpqhp_hardware_test (struct controller *ctrl, int test_num);
+extern void cpqhp_pushbutton_thread(unsigned long event_pointer);
+extern irqreturn_t cpqhp_ctrl_intr(int IRQ, void *data);
+extern int cpqhp_find_available_resources(struct controller *ctrl,
+ void __iomem *rom_start);
+extern int cpqhp_event_start_thread(void);
+extern void cpqhp_event_stop_thread(void);
+extern struct pci_func *cpqhp_slot_create(unsigned char busnumber);
+extern struct pci_func *cpqhp_slot_find(unsigned char bus, unsigned char device,
+ unsigned char index);
+extern int cpqhp_process_SI(struct controller *ctrl, struct pci_func *func);
+extern int cpqhp_process_SS(struct controller *ctrl, struct pci_func *func);
+extern int cpqhp_hardware_test(struct controller *ctrl, int test_num);
/* resource functions */
extern int cpqhp_resource_sort_and_combine (struct pci_resource **head);
/* pci functions */
-extern int cpqhp_set_irq (u8 bus_num, u8 dev_num, u8 int_pin, u8 irq_num);
-extern int cpqhp_get_bus_dev (struct controller *ctrl, u8 *bus_num, u8 *dev_num, u8 slot);
-extern int cpqhp_save_config (struct controller *ctrl, int busnumber, int is_hot_plug);
-extern int cpqhp_save_base_addr_length (struct controller *ctrl, struct pci_func * func);
-extern int cpqhp_save_used_resources (struct controller *ctrl, struct pci_func * func);
-extern int cpqhp_configure_board (struct controller *ctrl, struct pci_func * func);
-extern int cpqhp_save_slot_config (struct controller *ctrl, struct pci_func * new_slot);
-extern int cpqhp_valid_replace (struct controller *ctrl, struct pci_func * func);
-extern void cpqhp_destroy_board_resources (struct pci_func * func);
-extern int cpqhp_return_board_resources (struct pci_func * func, struct resource_lists * resources);
-extern void cpqhp_destroy_resource_list (struct resource_lists * resources);
-extern int cpqhp_configure_device (struct controller* ctrl, struct pci_func* func);
-extern int cpqhp_unconfigure_device (struct pci_func* func);
+extern int cpqhp_set_irq(u8 bus_num, u8 dev_num, u8 int_pin, u8 irq_num);
+extern int cpqhp_get_bus_dev(struct controller *ctrl, u8 *bus_num, u8 *dev_num,
+ u8 slot);
+extern int cpqhp_save_config(struct controller *ctrl, int busnumber,
+ int is_hot_plug);
+extern int cpqhp_save_base_addr_length(struct controller *ctrl,
+ struct pci_func *func);
+extern int cpqhp_save_used_resources(struct controller *ctrl,
+ struct pci_func *func);
+extern int cpqhp_configure_board(struct controller *ctrl,
+ struct pci_func *func);
+extern int cpqhp_save_slot_config(struct controller *ctrl,
+ struct pci_func *new_slot);
+extern int cpqhp_valid_replace(struct controller *ctrl, struct pci_func *func);
+extern void cpqhp_destroy_board_resources(struct pci_func *func);
+extern int cpqhp_return_board_resources (struct pci_func *func,
+ struct resource_lists *resources);
+extern void cpqhp_destroy_resource_list(struct resource_lists *resources);
+extern int cpqhp_configure_device(struct controller *ctrl,
+ struct pci_func *func);
+extern int cpqhp_unconfigure_device(struct pci_func *func);
/* Global variables */
extern int cpqhp_debug;
extern int cpqhp_legacy_mode;
extern struct controller *cpqhp_ctrl_list;
extern struct pci_func *cpqhp_slot_list[256];
+extern struct irq_routing_table *cpqhp_routing_table;
/* these can be gotten rid of, but for debugging they are purty */
extern u8 cpqhp_nic_irq;
@@ -449,7 +464,7 @@ extern u8 cpqhp_disk_irq;
/* inline functions */
-static inline char *slot_name(struct slot *slot)
+static inline const char *slot_name(struct slot *slot)
{
return hotplug_slot_name(slot->hotplug_slot);
}
@@ -458,9 +473,9 @@ static inline char *slot_name(struct slot *slot)
* return_resource
*
* Puts node back in the resource list pointed to by head
- *
*/
-static inline void return_resource(struct pci_resource **head, struct pci_resource *node)
+static inline void return_resource(struct pci_resource **head,
+ struct pci_resource *node)
{
if (!node || !head)
return;
@@ -471,7 +486,7 @@ static inline void return_resource(struct pci_resource **head, struct pci_resour
static inline void set_SOGO(struct controller *ctrl)
{
u16 misc;
-
+
misc = readw(ctrl->hpc_reg + MISC);
misc = (misc | 0x0001) & 0xFFFB;
writew(misc, ctrl->hpc_reg + MISC);
@@ -481,7 +496,7 @@ static inline void set_SOGO(struct controller *ctrl)
static inline void amber_LED_on(struct controller *ctrl, u8 slot)
{
u32 led_control;
-
+
led_control = readl(ctrl->hpc_reg + LED_CONTROL);
led_control |= (0x01010000L << slot);
writel(led_control, ctrl->hpc_reg + LED_CONTROL);
@@ -491,7 +506,7 @@ static inline void amber_LED_on(struct controller *ctrl, u8 slot)
static inline void amber_LED_off(struct controller *ctrl, u8 slot)
{
u32 led_control;
-
+
led_control = readl(ctrl->hpc_reg + LED_CONTROL);
led_control &= ~(0x01010000L << slot);
writel(led_control, ctrl->hpc_reg + LED_CONTROL);
@@ -504,7 +519,7 @@ static inline int read_amber_LED(struct controller *ctrl, u8 slot)
led_control = readl(ctrl->hpc_reg + LED_CONTROL);
led_control &= (0x01010000L << slot);
-
+
return led_control ? 1 : 0;
}
@@ -512,7 +527,7 @@ static inline int read_amber_LED(struct controller *ctrl, u8 slot)
static inline void green_LED_on(struct controller *ctrl, u8 slot)
{
u32 led_control;
-
+
led_control = readl(ctrl->hpc_reg + LED_CONTROL);
led_control |= 0x0101L << slot;
writel(led_control, ctrl->hpc_reg + LED_CONTROL);
@@ -521,7 +536,7 @@ static inline void green_LED_on(struct controller *ctrl, u8 slot)
static inline void green_LED_off(struct controller *ctrl, u8 slot)
{
u32 led_control;
-
+
led_control = readl(ctrl->hpc_reg + LED_CONTROL);
led_control &= ~(0x0101L << slot);
writel(led_control, ctrl->hpc_reg + LED_CONTROL);
@@ -531,7 +546,7 @@ static inline void green_LED_off(struct controller *ctrl, u8 slot)
static inline void green_LED_blink(struct controller *ctrl, u8 slot)
{
u32 led_control;
-
+
led_control = readl(ctrl->hpc_reg + LED_CONTROL);
led_control &= ~(0x0101L << slot);
led_control |= (0x0001L << slot);
@@ -575,22 +590,21 @@ static inline u8 read_slot_enable(struct controller *ctrl)
}
-/*
+/**
* get_controller_speed - find the current frequency/mode of controller.
*
* @ctrl: controller to get frequency/mode for.
*
* Returns controller speed.
- *
*/
static inline u8 get_controller_speed(struct controller *ctrl)
{
u8 curr_freq;
- u16 misc;
-
+ u16 misc;
+
if (ctrl->pcix_support) {
curr_freq = readb(ctrl->hpc_reg + NEXT_CURR_FREQ);
- if ((curr_freq & 0xB0) == 0xB0)
+ if ((curr_freq & 0xB0) == 0xB0)
return PCI_SPEED_133MHz_PCIX;
if ((curr_freq & 0xA0) == 0xA0)
return PCI_SPEED_100MHz_PCIX;
@@ -602,19 +616,18 @@ static inline u8 get_controller_speed(struct controller *ctrl)
return PCI_SPEED_33MHz;
}
- misc = readw(ctrl->hpc_reg + MISC);
- return (misc & 0x0800) ? PCI_SPEED_66MHz : PCI_SPEED_33MHz;
+ misc = readw(ctrl->hpc_reg + MISC);
+ return (misc & 0x0800) ? PCI_SPEED_66MHz : PCI_SPEED_33MHz;
}
-
-/*
+
+/**
* get_adapter_speed - find the max supported frequency/mode of adapter.
*
* @ctrl: hotplug controller.
* @hp_slot: hotplug slot where adapter is installed.
*
* Returns adapter speed.
- *
*/
static inline u8 get_adapter_speed(struct controller *ctrl, u8 hp_slot)
{
@@ -672,7 +685,8 @@ static inline int get_slot_enabled(struct controller *ctrl, struct slot *slot)
}
-static inline int cpq_get_latch_status(struct controller *ctrl, struct slot *slot)
+static inline int cpq_get_latch_status(struct controller *ctrl,
+ struct slot *slot)
{
u32 status;
u8 hp_slot;
@@ -687,7 +701,8 @@ static inline int cpq_get_latch_status(struct controller *ctrl, struct slot *slo
}
-static inline int get_presence_status(struct controller *ctrl, struct slot *slot)
+static inline int get_presence_status(struct controller *ctrl,
+ struct slot *slot)
{
int presence_save = 0;
u8 hp_slot;
@@ -696,7 +711,8 @@ static inline int get_presence_status(struct controller *ctrl, struct slot *slot
hp_slot = slot->device - ctrl->slot_device_offset;
tempdword = readl(ctrl->hpc_reg + INT_INPUT_CLEAR);
- presence_save = (int) ((((~tempdword) >> 23) | ((~tempdword) >> 15)) >> hp_slot) & 0x02;
+ presence_save = (int) ((((~tempdword) >> 23) | ((~tempdword) >> 15))
+ >> hp_slot) & 0x02;
return presence_save;
}
@@ -718,5 +734,12 @@ static inline int wait_for_ctrl_irq(struct controller *ctrl)
return retval;
}
-#endif
+#include <asm/pci_x86.h>
+static inline int cpqhp_routing_table_length(void)
+{
+ BUG_ON(cpqhp_routing_table == NULL);
+ return ((cpqhp_routing_table->size - sizeof(struct irq_routing_table)) /
+ sizeof(struct irq_info));
+}
+#endif
diff --git a/drivers/pci/hotplug/cpqphp_core.c b/drivers/pci/hotplug/cpqphp_core.c
index c2e1bcbb28a7..075b4f4b6e0d 100644
--- a/drivers/pci/hotplug/cpqphp_core.c
+++ b/drivers/pci/hotplug/cpqphp_core.c
@@ -25,8 +25,7 @@
* Send feedback to <greg@kroah.com>
*
* Jan 12, 2003 - Added 66/100/133MHz PCI-X support,
- * Torben Mathiasen <torben.mathiasen@hp.com>
- *
+ * Torben Mathiasen <torben.mathiasen@hp.com>
*/
#include <linux/module.h>
@@ -45,7 +44,6 @@
#include "cpqphp.h"
#include "cpqphp_nvram.h"
-#include <asm/pci_x86.h>
/* Global variables */
@@ -53,6 +51,7 @@ int cpqhp_debug;
int cpqhp_legacy_mode;
struct controller *cpqhp_ctrl_list; /* = NULL */
struct pci_func *cpqhp_slot_list[256];
+struct irq_routing_table *cpqhp_routing_table;
/* local variables */
static void __iomem *smbios_table;
@@ -78,33 +77,6 @@ MODULE_PARM_DESC(debug, "Debugging mode enabled or not");
#define CPQHPC_MODULE_MINOR 208
-static int one_time_init (void);
-static int set_attention_status (struct hotplug_slot *slot, u8 value);
-static int process_SI (struct hotplug_slot *slot);
-static int process_SS (struct hotplug_slot *slot);
-static int hardware_test (struct hotplug_slot *slot, u32 value);
-static int get_power_status (struct hotplug_slot *slot, u8 *value);
-static int get_attention_status (struct hotplug_slot *slot, u8 *value);
-static int get_latch_status (struct hotplug_slot *slot, u8 *value);
-static int get_adapter_status (struct hotplug_slot *slot, u8 *value);
-static int get_max_bus_speed (struct hotplug_slot *slot, enum pci_bus_speed *value);
-static int get_cur_bus_speed (struct hotplug_slot *slot, enum pci_bus_speed *value);
-
-static struct hotplug_slot_ops cpqphp_hotplug_slot_ops = {
- .owner = THIS_MODULE,
- .set_attention_status = set_attention_status,
- .enable_slot = process_SI,
- .disable_slot = process_SS,
- .hardware_test = hardware_test,
- .get_power_status = get_power_status,
- .get_attention_status = get_attention_status,
- .get_latch_status = get_latch_status,
- .get_adapter_status = get_adapter_status,
- .get_max_bus_speed = get_max_bus_speed,
- .get_cur_bus_speed = get_cur_bus_speed,
-};
-
-
static inline int is_slot64bit(struct slot *slot)
{
return (readb(slot->p_sm_slot + SMBIOS_SLOT_WIDTH) == 0x06) ? 1 : 0;
@@ -144,7 +116,7 @@ static void __iomem * detect_SMBIOS_pointer(void __iomem *begin, void __iomem *e
break;
}
}
-
+
if (!status)
fp = NULL;
@@ -171,7 +143,7 @@ static int init_SERR(struct controller * ctrl)
tempdword = ctrl->first_slot;
number_of_slots = readb(ctrl->hpc_reg + SLOT_MASK) & 0x0F;
- // Loop through slots
+ /* Loop through slots */
while (number_of_slots) {
physical_slot = tempdword;
writeb(0, ctrl->hpc_reg + SLOT_SERR);
@@ -182,41 +154,42 @@ static int init_SERR(struct controller * ctrl)
return 0;
}
-
-/* nice debugging output */
-static int pci_print_IRQ_route (void)
+static int init_cpqhp_routing_table(void)
{
- struct irq_routing_table *routing_table;
int len;
- int loop;
-
- u8 tbus, tdevice, tslot;
- routing_table = pcibios_get_irq_routing_table();
- if (routing_table == NULL) {
- err("No BIOS Routing Table??? Not good\n");
+ cpqhp_routing_table = pcibios_get_irq_routing_table();
+ if (cpqhp_routing_table == NULL)
return -ENOMEM;
- }
- len = (routing_table->size - sizeof(struct irq_routing_table)) /
- sizeof(struct irq_info);
- // Make sure I got at least one entry
+ len = cpqhp_routing_table_length();
if (len == 0) {
- kfree(routing_table);
+ kfree(cpqhp_routing_table);
+ cpqhp_routing_table = NULL;
return -1;
}
- dbg("bus dev func slot\n");
+ return 0;
+}
+
+/* nice debugging output */
+static void pci_print_IRQ_route(void)
+{
+ int len;
+ int loop;
+ u8 tbus, tdevice, tslot;
+
+ len = cpqhp_routing_table_length();
+ dbg("bus dev func slot\n");
for (loop = 0; loop < len; ++loop) {
- tbus = routing_table->slots[loop].bus;
- tdevice = routing_table->slots[loop].devfn;
- tslot = routing_table->slots[loop].slot;
+ tbus = cpqhp_routing_table->slots[loop].bus;
+ tdevice = cpqhp_routing_table->slots[loop].devfn;
+ tslot = cpqhp_routing_table->slots[loop].slot;
dbg("%d %d %d %d\n", tbus, tdevice >> 3, tdevice & 0x7, tslot);
}
- kfree(routing_table);
- return 0;
+ return;
}
@@ -242,9 +215,9 @@ static void __iomem *get_subsequent_smbios_entry(void __iomem *smbios_start,
void __iomem *p_max;
if (!smbios_table || !curr)
- return(NULL);
+ return NULL;
- // set p_max to the end of the table
+ /* set p_max to the end of the table */
p_max = smbios_start + readw(smbios_table + ST_LENGTH);
p_temp = curr;
@@ -253,20 +226,19 @@ static void __iomem *get_subsequent_smbios_entry(void __iomem *smbios_start,
while ((p_temp < p_max) && !bail) {
/* Look for the double NULL terminator
* The first condition is the previous byte
- * and the second is the curr */
- if (!previous_byte && !(readb(p_temp))) {
+ * and the second is the curr
+ */
+ if (!previous_byte && !(readb(p_temp)))
bail = 1;
- }
previous_byte = readb(p_temp);
p_temp++;
}
- if (p_temp < p_max) {
+ if (p_temp < p_max)
return p_temp;
- } else {
+ else
return NULL;
- }
}
@@ -292,21 +264,18 @@ static void __iomem *get_SMBIOS_entry(void __iomem *smbios_start,
if (!smbios_table)
return NULL;
- if (!previous) {
+ if (!previous)
previous = smbios_start;
- } else {
+ else
previous = get_subsequent_smbios_entry(smbios_start,
smbios_table, previous);
- }
- while (previous) {
- if (readb(previous + SMBIOS_GENERIC_TYPE) != type) {
+ while (previous)
+ if (readb(previous + SMBIOS_GENERIC_TYPE) != type)
previous = get_subsequent_smbios_entry(smbios_start,
smbios_table, previous);
- } else {
+ else
break;
- }
- }
return previous;
}
@@ -322,144 +291,6 @@ static void release_slot(struct hotplug_slot *hotplug_slot)
kfree(slot);
}
-#define SLOT_NAME_SIZE 10
-
-static int ctrl_slot_setup(struct controller *ctrl,
- void __iomem *smbios_start,
- void __iomem *smbios_table)
-{
- struct slot *slot;
- struct hotplug_slot *hotplug_slot;
- struct hotplug_slot_info *hotplug_slot_info;
- u8 number_of_slots;
- u8 slot_device;
- u8 slot_number;
- u8 ctrl_slot;
- u32 tempdword;
- char name[SLOT_NAME_SIZE];
- void __iomem *slot_entry= NULL;
- int result = -ENOMEM;
-
- dbg("%s\n", __func__);
-
- tempdword = readl(ctrl->hpc_reg + INT_INPUT_CLEAR);
-
- number_of_slots = readb(ctrl->hpc_reg + SLOT_MASK) & 0x0F;
- slot_device = readb(ctrl->hpc_reg + SLOT_MASK) >> 4;
- slot_number = ctrl->first_slot;
-
- while (number_of_slots) {
- slot = kzalloc(sizeof(*slot), GFP_KERNEL);
- if (!slot)
- goto error;
-
- slot->hotplug_slot = kzalloc(sizeof(*(slot->hotplug_slot)),
- GFP_KERNEL);
- if (!slot->hotplug_slot)
- goto error_slot;
- hotplug_slot = slot->hotplug_slot;
-
- hotplug_slot->info =
- kzalloc(sizeof(*(hotplug_slot->info)),
- GFP_KERNEL);
- if (!hotplug_slot->info)
- goto error_hpslot;
- hotplug_slot_info = hotplug_slot->info;
-
- slot->ctrl = ctrl;
- slot->bus = ctrl->bus;
- slot->device = slot_device;
- slot->number = slot_number;
- dbg("slot->number = %u\n", slot->number);
-
- slot_entry = get_SMBIOS_entry(smbios_start, smbios_table, 9,
- slot_entry);
-
- while (slot_entry && (readw(slot_entry + SMBIOS_SLOT_NUMBER) !=
- slot->number)) {
- slot_entry = get_SMBIOS_entry(smbios_start,
- smbios_table, 9, slot_entry);
- }
-
- slot->p_sm_slot = slot_entry;
-
- init_timer(&slot->task_event);
- slot->task_event.expires = jiffies + 5 * HZ;
- slot->task_event.function = cpqhp_pushbutton_thread;
-
- //FIXME: these capabilities aren't used but if they are
- // they need to be correctly implemented
- slot->capabilities |= PCISLOT_REPLACE_SUPPORTED;
- slot->capabilities |= PCISLOT_INTERLOCK_SUPPORTED;
-
- if (is_slot64bit(slot))
- slot->capabilities |= PCISLOT_64_BIT_SUPPORTED;
- if (is_slot66mhz(slot))
- slot->capabilities |= PCISLOT_66_MHZ_SUPPORTED;
- if (ctrl->speed == PCI_SPEED_66MHz)
- slot->capabilities |= PCISLOT_66_MHZ_OPERATION;
-
- ctrl_slot =
- slot_device - (readb(ctrl->hpc_reg + SLOT_MASK) >> 4);
-
- // Check presence
- slot->capabilities |=
- ((((~tempdword) >> 23) |
- ((~tempdword) >> 15)) >> ctrl_slot) & 0x02;
- // Check the switch state
- slot->capabilities |=
- ((~tempdword & 0xFF) >> ctrl_slot) & 0x01;
- // Check the slot enable
- slot->capabilities |=
- ((read_slot_enable(ctrl) << 2) >> ctrl_slot) & 0x04;
-
- /* register this slot with the hotplug pci core */
- hotplug_slot->release = &release_slot;
- hotplug_slot->private = slot;
- snprintf(name, SLOT_NAME_SIZE, "%u", slot->number);
- hotplug_slot->ops = &cpqphp_hotplug_slot_ops;
-
- hotplug_slot_info->power_status = get_slot_enabled(ctrl, slot);
- hotplug_slot_info->attention_status =
- cpq_get_attention_status(ctrl, slot);
- hotplug_slot_info->latch_status =
- cpq_get_latch_status(ctrl, slot);
- hotplug_slot_info->adapter_status =
- get_presence_status(ctrl, slot);
-
- dbg("registering bus %d, dev %d, number %d, "
- "ctrl->slot_device_offset %d, slot %d\n",
- slot->bus, slot->device,
- slot->number, ctrl->slot_device_offset,
- slot_number);
- result = pci_hp_register(hotplug_slot,
- ctrl->pci_dev->bus,
- slot->device,
- name);
- if (result) {
- err("pci_hp_register failed with error %d\n", result);
- goto error_info;
- }
-
- slot->next = ctrl->slot;
- ctrl->slot = slot;
-
- number_of_slots--;
- slot_device++;
- slot_number++;
- }
-
- return 0;
-error_info:
- kfree(hotplug_slot_info);
-error_hpslot:
- kfree(hotplug_slot);
-error_slot:
- kfree(slot);
-error:
- return result;
-}
-
static int ctrl_slot_cleanup (struct controller * ctrl)
{
struct slot *old_slot, *next_slot;
@@ -476,36 +307,32 @@ static int ctrl_slot_cleanup (struct controller * ctrl)
cpqhp_remove_debugfs_files(ctrl);
- //Free IRQ associated with hot plug device
+ /* Free IRQ associated with hot plug device */
free_irq(ctrl->interrupt, ctrl);
- //Unmap the memory
+ /* Unmap the memory */
iounmap(ctrl->hpc_reg);
- //Finally reclaim PCI mem
+ /* Finally reclaim PCI mem */
release_mem_region(pci_resource_start(ctrl->pci_dev, 0),
pci_resource_len(ctrl->pci_dev, 0));
- return(0);
+ return 0;
}
-//============================================================================
-// function: get_slot_mapping
-//
-// Description: Attempts to determine a logical slot mapping for a PCI
-// device. Won't work for more than one PCI-PCI bridge
-// in a slot.
-//
-// Input: u8 bus_num - bus number of PCI device
-// u8 dev_num - device number of PCI device
-// u8 *slot - Pointer to u8 where slot number will
-// be returned
-//
-// Output: SUCCESS or FAILURE
-//=============================================================================
+/**
+ * get_slot_mapping - determine logical slot mapping for PCI device
+ *
+ * Won't work for more than one PCI-PCI bridge in a slot.
+ *
+ * @bus_num - bus number of PCI device
+ * @dev_num - device number of PCI device
+ * @slot - Pointer to u8 where slot number will be returned
+ *
+ * Output: SUCCESS or FAILURE
+ */
static int
get_slot_mapping(struct pci_bus *bus, u8 bus_num, u8 dev_num, u8 *slot)
{
- struct irq_routing_table *PCIIRQRoutingInfoLength;
u32 work;
long len;
long loop;
@@ -516,36 +343,25 @@ get_slot_mapping(struct pci_bus *bus, u8 bus_num, u8 dev_num, u8 *slot)
bridgeSlot = 0xFF;
- PCIIRQRoutingInfoLength = pcibios_get_irq_routing_table();
- if (!PCIIRQRoutingInfoLength)
- return -1;
-
- len = (PCIIRQRoutingInfoLength->size -
- sizeof(struct irq_routing_table)) / sizeof(struct irq_info);
- // Make sure I got at least one entry
- if (len == 0) {
- kfree(PCIIRQRoutingInfoLength);
- return -1;
- }
-
+ len = cpqhp_routing_table_length();
for (loop = 0; loop < len; ++loop) {
- tbus = PCIIRQRoutingInfoLength->slots[loop].bus;
- tdevice = PCIIRQRoutingInfoLength->slots[loop].devfn >> 3;
- tslot = PCIIRQRoutingInfoLength->slots[loop].slot;
+ tbus = cpqhp_routing_table->slots[loop].bus;
+ tdevice = cpqhp_routing_table->slots[loop].devfn >> 3;
+ tslot = cpqhp_routing_table->slots[loop].slot;
if ((tbus == bus_num) && (tdevice == dev_num)) {
*slot = tslot;
- kfree(PCIIRQRoutingInfoLength);
return 0;
} else {
/* Did not get a match on the target PCI device. Check
- * if the current IRQ table entry is a PCI-to-PCI bridge
- * device. If so, and it's secondary bus matches the
- * bus number for the target device, I need to save the
- * bridge's slot number. If I can not find an entry for
- * the target device, I will have to assume it's on the
- * other side of the bridge, and assign it the bridge's
- * slot. */
+ * if the current IRQ table entry is a PCI-to-PCI
+ * bridge device. If so, and it's secondary bus
+ * matches the bus number for the target device, I need
+ * to save the bridge's slot number. If I can not find
+ * an entry for the target device, I will have to
+ * assume it's on the other side of the bridge, and
+ * assign it the bridge's slot.
+ */
bus->number = tbus;
pci_bus_read_config_dword(bus, PCI_DEVFN(tdevice, 0),
PCI_CLASS_REVISION, &work);
@@ -555,25 +371,23 @@ get_slot_mapping(struct pci_bus *bus, u8 bus_num, u8 dev_num, u8 *slot)
PCI_DEVFN(tdevice, 0),
PCI_PRIMARY_BUS, &work);
// See if bridge's secondary bus matches target bus.
- if (((work >> 8) & 0x000000FF) == (long) bus_num) {
+ if (((work >> 8) & 0x000000FF) == (long) bus_num)
bridgeSlot = tslot;
- }
}
}
}
- // If we got here, we didn't find an entry in the IRQ mapping table
- // for the target PCI device. If we did determine that the target
- // device is on the other side of a PCI-to-PCI bridge, return the
- // slot number for the bridge.
+ /* If we got here, we didn't find an entry in the IRQ mapping table for
+ * the target PCI device. If we did determine that the target device
+ * is on the other side of a PCI-to-PCI bridge, return the slot number
+ * for the bridge.
+ */
if (bridgeSlot != 0xFF) {
*slot = bridgeSlot;
- kfree(PCIIRQRoutingInfoLength);
return 0;
}
- kfree(PCIIRQRoutingInfoLength);
- // Couldn't find an entry in the routing table for this PCI device
+ /* Couldn't find an entry in the routing table for this PCI device */
return -1;
}
@@ -591,32 +405,32 @@ cpqhp_set_attention_status(struct controller *ctrl, struct pci_func *func,
u8 hp_slot;
if (func == NULL)
- return(1);
+ return 1;
hp_slot = func->device - ctrl->slot_device_offset;
- // Wait for exclusive access to hardware
+ /* Wait for exclusive access to hardware */
mutex_lock(&ctrl->crit_sect);
- if (status == 1) {
+ if (status == 1)
amber_LED_on (ctrl, hp_slot);
- } else if (status == 0) {
+ else if (status == 0)
amber_LED_off (ctrl, hp_slot);
- } else {
- // Done with exclusive hardware access
+ else {
+ /* Done with exclusive hardware access */
mutex_unlock(&ctrl->crit_sect);
- return(1);
+ return 1;
}
set_SOGO(ctrl);
- // Wait for SOBS to be unset
+ /* Wait for SOBS to be unset */
wait_for_ctrl_irq (ctrl);
- // Done with exclusive hardware access
+ /* Done with exclusive hardware access */
mutex_unlock(&ctrl->crit_sect);
- return(0);
+ return 0;
}
@@ -719,7 +533,7 @@ static int hardware_test(struct hotplug_slot *hotplug_slot, u32 value)
dbg("%s - physical_slot = %s\n", __func__, slot_name(slot));
- return cpqhp_hardware_test(ctrl, value);
+ return cpqhp_hardware_test(ctrl, value);
}
@@ -738,7 +552,7 @@ static int get_attention_status(struct hotplug_slot *hotplug_slot, u8 *value)
{
struct slot *slot = hotplug_slot->private;
struct controller *ctrl = slot->ctrl;
-
+
dbg("%s - physical_slot = %s\n", __func__, slot_name(slot));
*value = cpq_get_attention_status(ctrl, slot);
@@ -793,6 +607,230 @@ static int get_cur_bus_speed (struct hotplug_slot *hotplug_slot, enum pci_bus_sp
return 0;
}
+static struct hotplug_slot_ops cpqphp_hotplug_slot_ops = {
+ .set_attention_status = set_attention_status,
+ .enable_slot = process_SI,
+ .disable_slot = process_SS,
+ .hardware_test = hardware_test,
+ .get_power_status = get_power_status,
+ .get_attention_status = get_attention_status,
+ .get_latch_status = get_latch_status,
+ .get_adapter_status = get_adapter_status,
+ .get_max_bus_speed = get_max_bus_speed,
+ .get_cur_bus_speed = get_cur_bus_speed,
+};
+
+#define SLOT_NAME_SIZE 10
+
+static int ctrl_slot_setup(struct controller *ctrl,
+ void __iomem *smbios_start,
+ void __iomem *smbios_table)
+{
+ struct slot *slot;
+ struct hotplug_slot *hotplug_slot;
+ struct hotplug_slot_info *hotplug_slot_info;
+ u8 number_of_slots;
+ u8 slot_device;
+ u8 slot_number;
+ u8 ctrl_slot;
+ u32 tempdword;
+ char name[SLOT_NAME_SIZE];
+ void __iomem *slot_entry= NULL;
+ int result = -ENOMEM;
+
+ dbg("%s\n", __func__);
+
+ tempdword = readl(ctrl->hpc_reg + INT_INPUT_CLEAR);
+
+ number_of_slots = readb(ctrl->hpc_reg + SLOT_MASK) & 0x0F;
+ slot_device = readb(ctrl->hpc_reg + SLOT_MASK) >> 4;
+ slot_number = ctrl->first_slot;
+
+ while (number_of_slots) {
+ slot = kzalloc(sizeof(*slot), GFP_KERNEL);
+ if (!slot)
+ goto error;
+
+ slot->hotplug_slot = kzalloc(sizeof(*(slot->hotplug_slot)),
+ GFP_KERNEL);
+ if (!slot->hotplug_slot)
+ goto error_slot;
+ hotplug_slot = slot->hotplug_slot;
+
+ hotplug_slot->info = kzalloc(sizeof(*(hotplug_slot->info)),
+ GFP_KERNEL);
+ if (!hotplug_slot->info)
+ goto error_hpslot;
+ hotplug_slot_info = hotplug_slot->info;
+
+ slot->ctrl = ctrl;
+ slot->bus = ctrl->bus;
+ slot->device = slot_device;
+ slot->number = slot_number;
+ dbg("slot->number = %u\n", slot->number);
+
+ slot_entry = get_SMBIOS_entry(smbios_start, smbios_table, 9,
+ slot_entry);
+
+ while (slot_entry && (readw(slot_entry + SMBIOS_SLOT_NUMBER) !=
+ slot->number)) {
+ slot_entry = get_SMBIOS_entry(smbios_start,
+ smbios_table, 9, slot_entry);
+ }
+
+ slot->p_sm_slot = slot_entry;
+
+ init_timer(&slot->task_event);
+ slot->task_event.expires = jiffies + 5 * HZ;
+ slot->task_event.function = cpqhp_pushbutton_thread;
+
+ /*FIXME: these capabilities aren't used but if they are
+ * they need to be correctly implemented
+ */
+ slot->capabilities |= PCISLOT_REPLACE_SUPPORTED;
+ slot->capabilities |= PCISLOT_INTERLOCK_SUPPORTED;
+
+ if (is_slot64bit(slot))
+ slot->capabilities |= PCISLOT_64_BIT_SUPPORTED;
+ if (is_slot66mhz(slot))
+ slot->capabilities |= PCISLOT_66_MHZ_SUPPORTED;
+ if (ctrl->speed == PCI_SPEED_66MHz)
+ slot->capabilities |= PCISLOT_66_MHZ_OPERATION;
+
+ ctrl_slot =
+ slot_device - (readb(ctrl->hpc_reg + SLOT_MASK) >> 4);
+
+ /* Check presence */
+ slot->capabilities |=
+ ((((~tempdword) >> 23) |
+ ((~tempdword) >> 15)) >> ctrl_slot) & 0x02;
+ /* Check the switch state */
+ slot->capabilities |=
+ ((~tempdword & 0xFF) >> ctrl_slot) & 0x01;
+ /* Check the slot enable */
+ slot->capabilities |=
+ ((read_slot_enable(ctrl) << 2) >> ctrl_slot) & 0x04;
+
+ /* register this slot with the hotplug pci core */
+ hotplug_slot->release = &release_slot;
+ hotplug_slot->private = slot;
+ snprintf(name, SLOT_NAME_SIZE, "%u", slot->number);
+ hotplug_slot->ops = &cpqphp_hotplug_slot_ops;
+
+ hotplug_slot_info->power_status = get_slot_enabled(ctrl, slot);
+ hotplug_slot_info->attention_status =
+ cpq_get_attention_status(ctrl, slot);
+ hotplug_slot_info->latch_status =
+ cpq_get_latch_status(ctrl, slot);
+ hotplug_slot_info->adapter_status =
+ get_presence_status(ctrl, slot);
+
+ dbg("registering bus %d, dev %d, number %d, "
+ "ctrl->slot_device_offset %d, slot %d\n",
+ slot->bus, slot->device,
+ slot->number, ctrl->slot_device_offset,
+ slot_number);
+ result = pci_hp_register(hotplug_slot,
+ ctrl->pci_dev->bus,
+ slot->device,
+ name);
+ if (result) {
+ err("pci_hp_register failed with error %d\n", result);
+ goto error_info;
+ }
+
+ slot->next = ctrl->slot;
+ ctrl->slot = slot;
+
+ number_of_slots--;
+ slot_device++;
+ slot_number++;
+ }
+
+ return 0;
+error_info:
+ kfree(hotplug_slot_info);
+error_hpslot:
+ kfree(hotplug_slot);
+error_slot:
+ kfree(slot);
+error:
+ return result;
+}
+
+static int one_time_init(void)
+{
+ int loop;
+ int retval = 0;
+
+ if (initialized)
+ return 0;
+
+ power_mode = 0;
+
+ retval = init_cpqhp_routing_table();
+ if (retval)
+ goto error;
+
+ if (cpqhp_debug)
+ pci_print_IRQ_route();
+
+ dbg("Initialize + Start the notification mechanism \n");
+
+ retval = cpqhp_event_start_thread();
+ if (retval)
+ goto error;
+
+ dbg("Initialize slot lists\n");
+ for (loop = 0; loop < 256; loop++)
+ cpqhp_slot_list[loop] = NULL;
+
+ /* FIXME: We also need to hook the NMI handler eventually.
+ * this also needs to be worked with Christoph
+ * register_NMI_handler();
+ */
+ /* Map rom address */
+ cpqhp_rom_start = ioremap(ROM_PHY_ADDR, ROM_PHY_LEN);
+ if (!cpqhp_rom_start) {
+ err ("Could not ioremap memory region for ROM\n");
+ retval = -EIO;
+ goto error;
+ }
+
+ /* Now, map the int15 entry point if we are on compaq specific
+ * hardware
+ */
+ compaq_nvram_init(cpqhp_rom_start);
+
+ /* Map smbios table entry point structure */
+ smbios_table = detect_SMBIOS_pointer(cpqhp_rom_start,
+ cpqhp_rom_start + ROM_PHY_LEN);
+ if (!smbios_table) {
+ err ("Could not find the SMBIOS pointer in memory\n");
+ retval = -EIO;
+ goto error_rom_start;
+ }
+
+ smbios_start = ioremap(readl(smbios_table + ST_ADDRESS),
+ readw(smbios_table + ST_LENGTH));
+ if (!smbios_start) {
+ err ("Could not ioremap memory region taken from SMBIOS values\n");
+ retval = -EIO;
+ goto error_smbios_start;
+ }
+
+ initialized = 1;
+
+ return retval;
+
+error_smbios_start:
+ iounmap(smbios_start);
+error_rom_start:
+ iounmap(cpqhp_rom_start);
+error:
+ return retval;
+}
+
static int cpqhpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
u8 num_of_slots = 0;
@@ -815,7 +853,9 @@ static int cpqhpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
return err;
}
- // Need to read VID early b/c it's used to differentiate CPQ and INTC discovery
+ /* Need to read VID early b/c it's used to differentiate CPQ and INTC
+ * discovery
+ */
rc = pci_read_config_word(pdev, PCI_VENDOR_ID, &vendor_id);
if (rc || ((vendor_id != PCI_VENDOR_ID_COMPAQ) && (vendor_id != PCI_VENDOR_ID_INTEL))) {
err(msg_HPC_non_compaq_or_intel);
@@ -832,217 +872,209 @@ static int cpqhpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
/* Check for the proper subsytem ID's
- * Intel uses a different SSID programming model than Compaq.
+ * Intel uses a different SSID programming model than Compaq.
* For Intel, each SSID bit identifies a PHP capability.
* Also Intel HPC's may have RID=0.
*/
- if ((pdev->revision > 2) || (vendor_id == PCI_VENDOR_ID_INTEL)) {
- // TODO: This code can be made to support non-Compaq or Intel subsystem IDs
- rc = pci_read_config_word(pdev, PCI_SUBSYSTEM_VENDOR_ID, &subsystem_vid);
- if (rc) {
- err("%s : pci_read_config_word failed\n", __func__);
- goto err_disable_device;
- }
- dbg("Subsystem Vendor ID: %x\n", subsystem_vid);
- if ((subsystem_vid != PCI_VENDOR_ID_COMPAQ) && (subsystem_vid != PCI_VENDOR_ID_INTEL)) {
- err(msg_HPC_non_compaq_or_intel);
- rc = -ENODEV;
- goto err_disable_device;
- }
+ if ((pdev->revision <= 2) && (vendor_id != PCI_VENDOR_ID_INTEL)) {
+ err(msg_HPC_not_supported);
+ return -ENODEV;
+ }
- ctrl = kzalloc(sizeof(struct controller), GFP_KERNEL);
- if (!ctrl) {
- err("%s : out of memory\n", __func__);
- rc = -ENOMEM;
- goto err_disable_device;
- }
+ /* TODO: This code can be made to support non-Compaq or Intel
+ * subsystem IDs
+ */
+ rc = pci_read_config_word(pdev, PCI_SUBSYSTEM_VENDOR_ID, &subsystem_vid);
+ if (rc) {
+ err("%s : pci_read_config_word failed\n", __func__);
+ goto err_disable_device;
+ }
+ dbg("Subsystem Vendor ID: %x\n", subsystem_vid);
+ if ((subsystem_vid != PCI_VENDOR_ID_COMPAQ) && (subsystem_vid != PCI_VENDOR_ID_INTEL)) {
+ err(msg_HPC_non_compaq_or_intel);
+ rc = -ENODEV;
+ goto err_disable_device;
+ }
- rc = pci_read_config_word(pdev, PCI_SUBSYSTEM_ID, &subsystem_deviceid);
- if (rc) {
- err("%s : pci_read_config_word failed\n", __func__);
- goto err_free_ctrl;
- }
+ ctrl = kzalloc(sizeof(struct controller), GFP_KERNEL);
+ if (!ctrl) {
+ err("%s : out of memory\n", __func__);
+ rc = -ENOMEM;
+ goto err_disable_device;
+ }
- info("Hot Plug Subsystem Device ID: %x\n", subsystem_deviceid);
-
- /* Set Vendor ID, so it can be accessed later from other functions */
- ctrl->vendor_id = vendor_id;
-
- switch (subsystem_vid) {
- case PCI_VENDOR_ID_COMPAQ:
- if (pdev->revision >= 0x13) { /* CIOBX */
- ctrl->push_flag = 1;
- ctrl->slot_switch_type = 1;
- ctrl->push_button = 1;
- ctrl->pci_config_space = 1;
- ctrl->defeature_PHP = 1;
- ctrl->pcix_support = 1;
- ctrl->pcix_speed_capability = 1;
- pci_read_config_byte(pdev, 0x41, &bus_cap);
- if (bus_cap & 0x80) {
- dbg("bus max supports 133MHz PCI-X\n");
- ctrl->speed_capability = PCI_SPEED_133MHz_PCIX;
- break;
- }
- if (bus_cap & 0x40) {
- dbg("bus max supports 100MHz PCI-X\n");
- ctrl->speed_capability = PCI_SPEED_100MHz_PCIX;
- break;
- }
- if (bus_cap & 20) {
- dbg("bus max supports 66MHz PCI-X\n");
- ctrl->speed_capability = PCI_SPEED_66MHz_PCIX;
- break;
- }
- if (bus_cap & 10) {
- dbg("bus max supports 66MHz PCI\n");
- ctrl->speed_capability = PCI_SPEED_66MHz;
- break;
- }
-
- break;
- }
-
- switch (subsystem_deviceid) {
- case PCI_SUB_HPC_ID:
- /* Original 6500/7000 implementation */
- ctrl->slot_switch_type = 1;
- ctrl->speed_capability = PCI_SPEED_33MHz;
- ctrl->push_button = 0;
- ctrl->pci_config_space = 1;
- ctrl->defeature_PHP = 1;
- ctrl->pcix_support = 0;
- ctrl->pcix_speed_capability = 0;
- break;
- case PCI_SUB_HPC_ID2:
- /* First Pushbutton implementation */
- ctrl->push_flag = 1;
- ctrl->slot_switch_type = 1;
- ctrl->speed_capability = PCI_SPEED_33MHz;
- ctrl->push_button = 1;
- ctrl->pci_config_space = 1;
- ctrl->defeature_PHP = 1;
- ctrl->pcix_support = 0;
- ctrl->pcix_speed_capability = 0;
- break;
- case PCI_SUB_HPC_ID_INTC:
- /* Third party (6500/7000) */
- ctrl->slot_switch_type = 1;
- ctrl->speed_capability = PCI_SPEED_33MHz;
- ctrl->push_button = 0;
- ctrl->pci_config_space = 1;
- ctrl->defeature_PHP = 1;
- ctrl->pcix_support = 0;
- ctrl->pcix_speed_capability = 0;
- break;
- case PCI_SUB_HPC_ID3:
- /* First 66 Mhz implementation */
- ctrl->push_flag = 1;
- ctrl->slot_switch_type = 1;
- ctrl->speed_capability = PCI_SPEED_66MHz;
- ctrl->push_button = 1;
- ctrl->pci_config_space = 1;
- ctrl->defeature_PHP = 1;
- ctrl->pcix_support = 0;
- ctrl->pcix_speed_capability = 0;
- break;
- case PCI_SUB_HPC_ID4:
- /* First PCI-X implementation, 100MHz */
- ctrl->push_flag = 1;
- ctrl->slot_switch_type = 1;
- ctrl->speed_capability = PCI_SPEED_100MHz_PCIX;
- ctrl->push_button = 1;
- ctrl->pci_config_space = 1;
- ctrl->defeature_PHP = 1;
- ctrl->pcix_support = 1;
- ctrl->pcix_speed_capability = 0;
- break;
- default:
- err(msg_HPC_not_supported);
- rc = -ENODEV;
- goto err_free_ctrl;
- }
- break;
+ rc = pci_read_config_word(pdev, PCI_SUBSYSTEM_ID, &subsystem_deviceid);
+ if (rc) {
+ err("%s : pci_read_config_word failed\n", __func__);
+ goto err_free_ctrl;
+ }
- case PCI_VENDOR_ID_INTEL:
- /* Check for speed capability (0=33, 1=66) */
- if (subsystem_deviceid & 0x0001) {
- ctrl->speed_capability = PCI_SPEED_66MHz;
- } else {
- ctrl->speed_capability = PCI_SPEED_33MHz;
- }
-
- /* Check for push button */
- if (subsystem_deviceid & 0x0002) {
- /* no push button */
- ctrl->push_button = 0;
- } else {
- /* push button supported */
- ctrl->push_button = 1;
- }
-
- /* Check for slot switch type (0=mechanical, 1=not mechanical) */
- if (subsystem_deviceid & 0x0004) {
- /* no switch */
- ctrl->slot_switch_type = 0;
- } else {
- /* switch */
- ctrl->slot_switch_type = 1;
- }
-
- /* PHP Status (0=De-feature PHP, 1=Normal operation) */
- if (subsystem_deviceid & 0x0008) {
- ctrl->defeature_PHP = 1; // PHP supported
- } else {
- ctrl->defeature_PHP = 0; // PHP not supported
- }
-
- /* Alternate Base Address Register Interface (0=not supported, 1=supported) */
- if (subsystem_deviceid & 0x0010) {
- ctrl->alternate_base_address = 1; // supported
- } else {
- ctrl->alternate_base_address = 0; // not supported
- }
-
- /* PCI Config Space Index (0=not supported, 1=supported) */
- if (subsystem_deviceid & 0x0020) {
- ctrl->pci_config_space = 1; // supported
- } else {
- ctrl->pci_config_space = 0; // not supported
- }
-
- /* PCI-X support */
- if (subsystem_deviceid & 0x0080) {
- /* PCI-X capable */
- ctrl->pcix_support = 1;
- /* Frequency of operation in PCI-X mode */
- if (subsystem_deviceid & 0x0040) {
- /* 133MHz PCI-X if bit 7 is 1 */
- ctrl->pcix_speed_capability = 1;
- } else {
- /* 100MHz PCI-X if bit 7 is 1 and bit 0 is 0, */
- /* 66MHz PCI-X if bit 7 is 1 and bit 0 is 1 */
- ctrl->pcix_speed_capability = 0;
- }
- } else {
- /* Conventional PCI */
- ctrl->pcix_support = 0;
- ctrl->pcix_speed_capability = 0;
- }
+ info("Hot Plug Subsystem Device ID: %x\n", subsystem_deviceid);
+
+ /* Set Vendor ID, so it can be accessed later from other
+ * functions
+ */
+ ctrl->vendor_id = vendor_id;
+
+ switch (subsystem_vid) {
+ case PCI_VENDOR_ID_COMPAQ:
+ if (pdev->revision >= 0x13) { /* CIOBX */
+ ctrl->push_flag = 1;
+ ctrl->slot_switch_type = 1;
+ ctrl->push_button = 1;
+ ctrl->pci_config_space = 1;
+ ctrl->defeature_PHP = 1;
+ ctrl->pcix_support = 1;
+ ctrl->pcix_speed_capability = 1;
+ pci_read_config_byte(pdev, 0x41, &bus_cap);
+ if (bus_cap & 0x80) {
+ dbg("bus max supports 133MHz PCI-X\n");
+ ctrl->speed_capability = PCI_SPEED_133MHz_PCIX;
break;
+ }
+ if (bus_cap & 0x40) {
+ dbg("bus max supports 100MHz PCI-X\n");
+ ctrl->speed_capability = PCI_SPEED_100MHz_PCIX;
+ break;
+ }
+ if (bus_cap & 20) {
+ dbg("bus max supports 66MHz PCI-X\n");
+ ctrl->speed_capability = PCI_SPEED_66MHz_PCIX;
+ break;
+ }
+ if (bus_cap & 10) {
+ dbg("bus max supports 66MHz PCI\n");
+ ctrl->speed_capability = PCI_SPEED_66MHz;
+ break;
+ }
+
+ break;
+ }
- default:
- err(msg_HPC_not_supported);
- rc = -ENODEV;
- goto err_free_ctrl;
+ switch (subsystem_deviceid) {
+ case PCI_SUB_HPC_ID:
+ /* Original 6500/7000 implementation */
+ ctrl->slot_switch_type = 1;
+ ctrl->speed_capability = PCI_SPEED_33MHz;
+ ctrl->push_button = 0;
+ ctrl->pci_config_space = 1;
+ ctrl->defeature_PHP = 1;
+ ctrl->pcix_support = 0;
+ ctrl->pcix_speed_capability = 0;
+ break;
+ case PCI_SUB_HPC_ID2:
+ /* First Pushbutton implementation */
+ ctrl->push_flag = 1;
+ ctrl->slot_switch_type = 1;
+ ctrl->speed_capability = PCI_SPEED_33MHz;
+ ctrl->push_button = 1;
+ ctrl->pci_config_space = 1;
+ ctrl->defeature_PHP = 1;
+ ctrl->pcix_support = 0;
+ ctrl->pcix_speed_capability = 0;
+ break;
+ case PCI_SUB_HPC_ID_INTC:
+ /* Third party (6500/7000) */
+ ctrl->slot_switch_type = 1;
+ ctrl->speed_capability = PCI_SPEED_33MHz;
+ ctrl->push_button = 0;
+ ctrl->pci_config_space = 1;
+ ctrl->defeature_PHP = 1;
+ ctrl->pcix_support = 0;
+ ctrl->pcix_speed_capability = 0;
+ break;
+ case PCI_SUB_HPC_ID3:
+ /* First 66 Mhz implementation */
+ ctrl->push_flag = 1;
+ ctrl->slot_switch_type = 1;
+ ctrl->speed_capability = PCI_SPEED_66MHz;
+ ctrl->push_button = 1;
+ ctrl->pci_config_space = 1;
+ ctrl->defeature_PHP = 1;
+ ctrl->pcix_support = 0;
+ ctrl->pcix_speed_capability = 0;
+ break;
+ case PCI_SUB_HPC_ID4:
+ /* First PCI-X implementation, 100MHz */
+ ctrl->push_flag = 1;
+ ctrl->slot_switch_type = 1;
+ ctrl->speed_capability = PCI_SPEED_100MHz_PCIX;
+ ctrl->push_button = 1;
+ ctrl->pci_config_space = 1;
+ ctrl->defeature_PHP = 1;
+ ctrl->pcix_support = 1;
+ ctrl->pcix_speed_capability = 0;
+ break;
+ default:
+ err(msg_HPC_not_supported);
+ rc = -ENODEV;
+ goto err_free_ctrl;
}
+ break;
+
+ case PCI_VENDOR_ID_INTEL:
+ /* Check for speed capability (0=33, 1=66) */
+ if (subsystem_deviceid & 0x0001)
+ ctrl->speed_capability = PCI_SPEED_66MHz;
+ else
+ ctrl->speed_capability = PCI_SPEED_33MHz;
+
+ /* Check for push button */
+ if (subsystem_deviceid & 0x0002)
+ ctrl->push_button = 0;
+ else
+ ctrl->push_button = 1;
+
+ /* Check for slot switch type (0=mechanical, 1=not mechanical) */
+ if (subsystem_deviceid & 0x0004)
+ ctrl->slot_switch_type = 0;
+ else
+ ctrl->slot_switch_type = 1;
+
+ /* PHP Status (0=De-feature PHP, 1=Normal operation) */
+ if (subsystem_deviceid & 0x0008)
+ ctrl->defeature_PHP = 1; /* PHP supported */
+ else
+ ctrl->defeature_PHP = 0; /* PHP not supported */
+
+ /* Alternate Base Address Register Interface
+ * (0=not supported, 1=supported)
+ */
+ if (subsystem_deviceid & 0x0010)
+ ctrl->alternate_base_address = 1;
+ else
+ ctrl->alternate_base_address = 0;
+
+ /* PCI Config Space Index (0=not supported, 1=supported) */
+ if (subsystem_deviceid & 0x0020)
+ ctrl->pci_config_space = 1;
+ else
+ ctrl->pci_config_space = 0;
+
+ /* PCI-X support */
+ if (subsystem_deviceid & 0x0080) {
+ ctrl->pcix_support = 1;
+ if (subsystem_deviceid & 0x0040)
+ /* 133MHz PCI-X if bit 7 is 1 */
+ ctrl->pcix_speed_capability = 1;
+ else
+ /* 100MHz PCI-X if bit 7 is 1 and bit 0 is 0, */
+ /* 66MHz PCI-X if bit 7 is 1 and bit 0 is 1 */
+ ctrl->pcix_speed_capability = 0;
+ } else {
+ /* Conventional PCI */
+ ctrl->pcix_support = 0;
+ ctrl->pcix_speed_capability = 0;
+ }
+ break;
- } else {
+ default:
err(msg_HPC_not_supported);
- return -ENODEV;
+ rc = -ENODEV;
+ goto err_free_ctrl;
}
- // Tell the user that we found one.
+ /* Tell the user that we found one. */
info("Initializing the PCI hot plug controller residing on PCI bus %d\n",
pdev->bus->number);
@@ -1087,7 +1119,7 @@ static int cpqhpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (rc) {
goto err_free_bus;
}
-
+
dbg("pdev = %p\n", pdev);
dbg("pci resource start %llx\n", (unsigned long long)pci_resource_start(pdev, 0));
dbg("pci resource len %llx\n", (unsigned long long)pci_resource_len(pdev, 0));
@@ -1109,7 +1141,7 @@ static int cpqhpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_free_mem_region;
}
- // Check for 66Mhz operation
+ /* Check for 66Mhz operation */
ctrl->speed = get_controller_speed(ctrl);
@@ -1120,7 +1152,7 @@ static int cpqhpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
*
********************************************************/
- // find the physical slot number of the first hot plug slot
+ /* find the physical slot number of the first hot plug slot */
/* Get slot won't work for devices behind bridges, but
* in this case it will always be called for the "base"
@@ -1137,7 +1169,7 @@ static int cpqhpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_iounmap;
}
- // Store PCI Config Space for all devices on this bus
+ /* Store PCI Config Space for all devices on this bus */
rc = cpqhp_save_config(ctrl, ctrl->bus, readb(ctrl->hpc_reg + SLOT_MASK));
if (rc) {
err("%s: unable to save PCI configuration data, error %d\n",
@@ -1148,7 +1180,7 @@ static int cpqhpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/*
* Get IO, memory, and IRQ resources for new devices
*/
- // The next line is required for cpqhp_find_available_resources
+ /* The next line is required for cpqhp_find_available_resources */
ctrl->interrupt = pdev->irq;
if (ctrl->interrupt < 0x10) {
cpqhp_legacy_mode = 1;
@@ -1182,7 +1214,7 @@ static int cpqhpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
__func__, rc);
goto err_iounmap;
}
-
+
/* Mask all general input interrupts */
writel(0xFFFFFFFFL, ctrl->hpc_reg + INT_MASK);
@@ -1196,12 +1228,14 @@ static int cpqhpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_iounmap;
}
- /* Enable Shift Out interrupt and clear it, also enable SERR on power fault */
+ /* Enable Shift Out interrupt and clear it, also enable SERR on power
+ * fault
+ */
temp_word = readw(ctrl->hpc_reg + MISC);
temp_word |= 0x4006;
writew(temp_word, ctrl->hpc_reg + MISC);
- // Changed 05/05/97 to clear all interrupts at start
+ /* Changed 05/05/97 to clear all interrupts at start */
writel(0xFFFFFFFFL, ctrl->hpc_reg + INT_INPUT_CLEAR);
ctrl->ctrl_int_comp = readl(ctrl->hpc_reg + INT_INPUT_CLEAR);
@@ -1216,13 +1250,14 @@ static int cpqhpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
cpqhp_ctrl_list = ctrl;
}
- // turn off empty slots here unless command line option "ON" set
- // Wait for exclusive access to hardware
+ /* turn off empty slots here unless command line option "ON" set
+ * Wait for exclusive access to hardware
+ */
mutex_lock(&ctrl->crit_sect);
num_of_slots = readb(ctrl->hpc_reg + SLOT_MASK) & 0x0F;
- // find first device number for the ctrl
+ /* find first device number for the ctrl */
device = readb(ctrl->hpc_reg + SLOT_MASK) >> 4;
while (num_of_slots) {
@@ -1234,23 +1269,21 @@ static int cpqhpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
hp_slot = func->device - ctrl->slot_device_offset;
dbg("hp_slot: %d\n", hp_slot);
- // We have to save the presence info for these slots
+ /* We have to save the presence info for these slots */
temp_word = ctrl->ctrl_int_comp >> 16;
func->presence_save = (temp_word >> hp_slot) & 0x01;
func->presence_save |= (temp_word >> (hp_slot + 7)) & 0x02;
- if (ctrl->ctrl_int_comp & (0x1L << hp_slot)) {
+ if (ctrl->ctrl_int_comp & (0x1L << hp_slot))
func->switch_save = 0;
- } else {
+ else
func->switch_save = 0x10;
- }
- if (!power_mode) {
+ if (!power_mode)
if (!func->is_a_board) {
green_LED_off(ctrl, hp_slot);
slot_disable(ctrl, hp_slot);
}
- }
device++;
num_of_slots--;
@@ -1258,7 +1291,7 @@ static int cpqhpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (!power_mode) {
set_SOGO(ctrl);
- // Wait for SOBS to be unset
+ /* Wait for SOBS to be unset */
wait_for_ctrl_irq(ctrl);
}
@@ -1269,7 +1302,7 @@ static int cpqhpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_free_irq;
}
- // Done with exclusive hardware access
+ /* Done with exclusive hardware access */
mutex_unlock(&ctrl->crit_sect);
cpqhp_create_debugfs_files(ctrl);
@@ -1291,77 +1324,6 @@ err_disable_device:
return rc;
}
-
-static int one_time_init(void)
-{
- int loop;
- int retval = 0;
-
- if (initialized)
- return 0;
-
- power_mode = 0;
-
- retval = pci_print_IRQ_route();
- if (retval)
- goto error;
-
- dbg("Initialize + Start the notification mechanism \n");
-
- retval = cpqhp_event_start_thread();
- if (retval)
- goto error;
-
- dbg("Initialize slot lists\n");
- for (loop = 0; loop < 256; loop++) {
- cpqhp_slot_list[loop] = NULL;
- }
-
- // FIXME: We also need to hook the NMI handler eventually.
- // this also needs to be worked with Christoph
- // register_NMI_handler();
-
- // Map rom address
- cpqhp_rom_start = ioremap(ROM_PHY_ADDR, ROM_PHY_LEN);
- if (!cpqhp_rom_start) {
- err ("Could not ioremap memory region for ROM\n");
- retval = -EIO;
- goto error;
- }
-
- /* Now, map the int15 entry point if we are on compaq specific hardware */
- compaq_nvram_init(cpqhp_rom_start);
-
- /* Map smbios table entry point structure */
- smbios_table = detect_SMBIOS_pointer(cpqhp_rom_start,
- cpqhp_rom_start + ROM_PHY_LEN);
- if (!smbios_table) {
- err ("Could not find the SMBIOS pointer in memory\n");
- retval = -EIO;
- goto error_rom_start;
- }
-
- smbios_start = ioremap(readl(smbios_table + ST_ADDRESS),
- readw(smbios_table + ST_LENGTH));
- if (!smbios_start) {
- err ("Could not ioremap memory region taken from SMBIOS values\n");
- retval = -EIO;
- goto error_smbios_start;
- }
-
- initialized = 1;
-
- return retval;
-
-error_smbios_start:
- iounmap(smbios_start);
-error_rom_start:
- iounmap(cpqhp_rom_start);
-error:
- return retval;
-}
-
-
static void __exit unload_cpqphpd(void)
{
struct pci_func *next;
@@ -1381,10 +1343,10 @@ static void __exit unload_cpqphpd(void)
if (ctrl->hpc_reg) {
u16 misc;
rc = read_slot_enable (ctrl);
-
+
writeb(0, ctrl->hpc_reg + SLOT_SERR);
writel(0xFFFFFFC0L | ~rc, ctrl->hpc_reg + INT_MASK);
-
+
misc = readw(ctrl->hpc_reg + MISC);
misc &= 0xFFFD;
writew(misc, ctrl->hpc_reg + MISC);
@@ -1464,38 +1426,34 @@ static void __exit unload_cpqphpd(void)
}
}
- // Stop the notification mechanism
+ /* Stop the notification mechanism */
if (initialized)
cpqhp_event_stop_thread();
- //unmap the rom address
+ /* unmap the rom address */
if (cpqhp_rom_start)
iounmap(cpqhp_rom_start);
if (smbios_start)
iounmap(smbios_start);
}
-
-
static struct pci_device_id hpcd_pci_tbl[] = {
{
/* handle any PCI Hotplug controller */
.class = ((PCI_CLASS_SYSTEM_PCI_HOTPLUG << 8) | 0x00),
.class_mask = ~0,
-
+
/* no matter who makes it */
.vendor = PCI_ANY_ID,
.device = PCI_ANY_ID,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
-
+
}, { /* end: all zeroes */ }
};
MODULE_DEVICE_TABLE(pci, hpcd_pci_tbl);
-
-
static struct pci_driver cpqhpc_driver = {
.name = "compaq_pci_hotplug",
.id_table = hpcd_pci_tbl,
@@ -1503,8 +1461,6 @@ static struct pci_driver cpqhpc_driver = {
/* remove: cpqhpc_remove_one, */
};
-
-
static int __init cpqhpc_init(void)
{
int result;
@@ -1518,7 +1474,6 @@ static int __init cpqhpc_init(void)
return result;
}
-
static void __exit cpqhpc_cleanup(void)
{
dbg("unload_cpqphpd()\n");
@@ -1529,8 +1484,5 @@ static void __exit cpqhpc_cleanup(void)
cpqhp_shutdown_debugfs();
}
-
module_init(cpqhpc_init);
module_exit(cpqhpc_cleanup);
-
-
diff --git a/drivers/pci/hotplug/cpqphp_ctrl.c b/drivers/pci/hotplug/cpqphp_ctrl.c
index cc227a8c4b11..2fa47af992a8 100644
--- a/drivers/pci/hotplug/cpqphp_ctrl.c
+++ b/drivers/pci/hotplug/cpqphp_ctrl.c
@@ -81,14 +81,15 @@ static u8 handle_switch_change(u8 change, struct controller * ctrl)
for (hp_slot = 0; hp_slot < 6; hp_slot++) {
if (change & (0x1L << hp_slot)) {
- /**********************************
+ /*
* this one changed.
- **********************************/
+ */
func = cpqhp_slot_find(ctrl->bus,
(hp_slot + ctrl->slot_device_offset), 0);
/* this is the structure that tells the worker thread
- *what to do */
+ * what to do
+ */
taskInfo = &(ctrl->event_queue[ctrl->next_event]);
ctrl->next_event = (ctrl->next_event + 1) % 10;
taskInfo->hp_slot = hp_slot;
@@ -100,17 +101,17 @@ static u8 handle_switch_change(u8 change, struct controller * ctrl)
func->presence_save |= (temp_word >> (hp_slot + 7)) & 0x02;
if (ctrl->ctrl_int_comp & (0x1L << hp_slot)) {
- /**********************************
+ /*
* Switch opened
- **********************************/
+ */
func->switch_save = 0;
taskInfo->event_type = INT_SWITCH_OPEN;
} else {
- /**********************************
+ /*
* Switch closed
- **********************************/
+ */
func->switch_save = 0x10;
@@ -131,9 +132,8 @@ static struct slot *cpqhp_find_slot(struct controller *ctrl, u8 device)
{
struct slot *slot = ctrl->slot;
- while (slot && (slot->device != device)) {
+ while (slot && (slot->device != device))
slot = slot->next;
- }
return slot;
}
@@ -152,17 +152,17 @@ static u8 handle_presence_change(u16 change, struct controller * ctrl)
if (!change)
return 0;
- /**********************************
+ /*
* Presence Change
- **********************************/
+ */
dbg("cpqsbd: Presence/Notify input change.\n");
dbg(" Changed bits are 0x%4.4x\n", change );
for (hp_slot = 0; hp_slot < 6; hp_slot++) {
if (change & (0x0101 << hp_slot)) {
- /**********************************
+ /*
* this one changed.
- **********************************/
+ */
func = cpqhp_slot_find(ctrl->bus,
(hp_slot + ctrl->slot_device_offset), 0);
@@ -177,22 +177,23 @@ static u8 handle_presence_change(u16 change, struct controller * ctrl)
return 0;
/* If the switch closed, must be a button
- * If not in button mode, nevermind */
+ * If not in button mode, nevermind
+ */
if (func->switch_save && (ctrl->push_button == 1)) {
temp_word = ctrl->ctrl_int_comp >> 16;
temp_byte = (temp_word >> hp_slot) & 0x01;
temp_byte |= (temp_word >> (hp_slot + 7)) & 0x02;
if (temp_byte != func->presence_save) {
- /**************************************
+ /*
* button Pressed (doesn't do anything)
- **************************************/
+ */
dbg("hp_slot %d button pressed\n", hp_slot);
taskInfo->event_type = INT_BUTTON_PRESS;
} else {
- /**********************************
+ /*
* button Released - TAKE ACTION!!!!
- **********************************/
+ */
dbg("hp_slot %d button released\n", hp_slot);
taskInfo->event_type = INT_BUTTON_RELEASE;
@@ -210,7 +211,8 @@ static u8 handle_presence_change(u16 change, struct controller * ctrl)
}
} else {
/* Switch is open, assume a presence change
- * Save the presence state */
+ * Save the presence state
+ */
temp_word = ctrl->ctrl_int_comp >> 16;
func->presence_save = (temp_word >> hp_slot) & 0x01;
func->presence_save |= (temp_word >> (hp_slot + 7)) & 0x02;
@@ -241,17 +243,17 @@ static u8 handle_power_fault(u8 change, struct controller * ctrl)
if (!change)
return 0;
- /**********************************
+ /*
* power fault
- **********************************/
+ */
info("power fault interrupt\n");
for (hp_slot = 0; hp_slot < 6; hp_slot++) {
if (change & (0x01 << hp_slot)) {
- /**********************************
+ /*
* this one changed.
- **********************************/
+ */
func = cpqhp_slot_find(ctrl->bus,
(hp_slot + ctrl->slot_device_offset), 0);
@@ -262,16 +264,16 @@ static u8 handle_power_fault(u8 change, struct controller * ctrl)
rc++;
if (ctrl->ctrl_int_comp & (0x00000100 << hp_slot)) {
- /**********************************
+ /*
* power fault Cleared
- **********************************/
+ */
func->status = 0x00;
taskInfo->event_type = INT_POWER_FAULT_CLEAR;
} else {
- /**********************************
+ /*
* power fault
- **********************************/
+ */
taskInfo->event_type = INT_POWER_FAULT;
if (ctrl->rev < 4) {
@@ -432,13 +434,15 @@ static struct pci_resource *do_pre_bridge_resource_split(struct pci_resource **h
/* If we got here, there the bridge requires some of the resource, but
- * we may be able to split some off of the front */
+ * we may be able to split some off of the front
+ */
node = *head;
if (node->length & (alignment -1)) {
/* this one isn't an aligned length, so we'll make a new entry
- * and split it up. */
+ * and split it up.
+ */
split_node = kmalloc(sizeof(*split_node), GFP_KERNEL);
if (!split_node)
@@ -544,10 +548,10 @@ static struct pci_resource *get_io_resource(struct pci_resource **head, u32 size
if (!(*head))
return NULL;
- if ( cpqhp_resource_sort_and_combine(head) )
+ if (cpqhp_resource_sort_and_combine(head))
return NULL;
- if ( sort_by_size(head) )
+ if (sort_by_size(head))
return NULL;
for (node = *head; node; node = node->next) {
@@ -556,7 +560,8 @@ static struct pci_resource *get_io_resource(struct pci_resource **head, u32 size
if (node->base & (size - 1)) {
/* this one isn't base aligned properly
- * so we'll make a new entry and split it up */
+ * so we'll make a new entry and split it up
+ */
temp_dword = (node->base | (size-1)) + 1;
/* Short circuit if adjusted size is too small */
@@ -581,7 +586,8 @@ static struct pci_resource *get_io_resource(struct pci_resource **head, u32 size
/* Don't need to check if too small since we already did */
if (node->length > size) {
/* this one is longer than we need
- * so we'll make a new entry and split it up */
+ * so we'll make a new entry and split it up
+ */
split_node = kmalloc(sizeof(*split_node), GFP_KERNEL);
if (!split_node)
@@ -601,7 +607,8 @@ static struct pci_resource *get_io_resource(struct pci_resource **head, u32 size
continue;
/* If we got here, then it is the right size
- * Now take it out of the list and break */
+ * Now take it out of the list and break
+ */
if (*head == node) {
*head = node->next;
} else {
@@ -642,14 +649,16 @@ static struct pci_resource *get_max_resource(struct pci_resource **head, u32 siz
return NULL;
for (max = *head; max; max = max->next) {
- /* If not big enough we could probably just bail,
- * instead we'll continue to the next. */
+ /* If not big enough we could probably just bail,
+ * instead we'll continue to the next.
+ */
if (max->length < size)
continue;
if (max->base & (size - 1)) {
/* this one isn't base aligned properly
- * so we'll make a new entry and split it up */
+ * so we'll make a new entry and split it up
+ */
temp_dword = (max->base | (size-1)) + 1;
/* Short circuit if adjusted size is too small */
@@ -672,7 +681,8 @@ static struct pci_resource *get_max_resource(struct pci_resource **head, u32 siz
if ((max->base + max->length) & (size - 1)) {
/* this one isn't end aligned properly at the top
- * so we'll make a new entry and split it up */
+ * so we'll make a new entry and split it up
+ */
split_node = kmalloc(sizeof(*split_node), GFP_KERNEL);
if (!split_node)
@@ -744,7 +754,8 @@ static struct pci_resource *get_resource(struct pci_resource **head, u32 size)
if (node->base & (size - 1)) {
dbg("%s: not aligned\n", __func__);
/* this one isn't base aligned properly
- * so we'll make a new entry and split it up */
+ * so we'll make a new entry and split it up
+ */
temp_dword = (node->base | (size-1)) + 1;
/* Short circuit if adjusted size is too small */
@@ -769,7 +780,8 @@ static struct pci_resource *get_resource(struct pci_resource **head, u32 size)
if (node->length > size) {
dbg("%s: too big\n", __func__);
/* this one is longer than we need
- * so we'll make a new entry and split it up */
+ * so we'll make a new entry and split it up
+ */
split_node = kmalloc(sizeof(*split_node), GFP_KERNEL);
if (!split_node)
@@ -886,19 +898,19 @@ irqreturn_t cpqhp_ctrl_intr(int IRQ, void *data)
u32 Diff;
u32 temp_dword;
-
+
misc = readw(ctrl->hpc_reg + MISC);
- /***************************************
+ /*
* Check to see if it was our interrupt
- ***************************************/
+ */
if (!(misc & 0x000C)) {
return IRQ_NONE;
}
if (misc & 0x0004) {
- /**********************************
+ /*
* Serial Output interrupt Pending
- **********************************/
+ */
/* Clear the interrupt */
misc |= 0x0004;
@@ -961,11 +973,8 @@ struct pci_func *cpqhp_slot_create(u8 busnumber)
struct pci_func *next;
new_slot = kzalloc(sizeof(*new_slot), GFP_KERNEL);
- if (new_slot == NULL) {
- /* I'm not dead yet!
- * You will be. */
+ if (new_slot == NULL)
return new_slot;
- }
new_slot->next = NULL;
new_slot->configured = 1;
@@ -996,10 +1005,8 @@ static int slot_remove(struct pci_func * old_slot)
return 1;
next = cpqhp_slot_list[old_slot->bus];
-
- if (next == NULL) {
+ if (next == NULL)
return 1;
- }
if (next == old_slot) {
cpqhp_slot_list[old_slot->bus] = old_slot->next;
@@ -1008,9 +1015,8 @@ static int slot_remove(struct pci_func * old_slot)
return 0;
}
- while ((next->next != old_slot) && (next->next != NULL)) {
+ while ((next->next != old_slot) && (next->next != NULL))
next = next->next;
- }
if (next->next == old_slot) {
next->next = old_slot->next;
@@ -1040,9 +1046,8 @@ static int bridge_slot_remove(struct pci_func *bridge)
for (tempBus = secondaryBus; tempBus <= subordinateBus; tempBus++) {
next = cpqhp_slot_list[tempBus];
- while (!slot_remove(next)) {
+ while (!slot_remove(next))
next = cpqhp_slot_list[tempBus];
- }
}
next = cpqhp_slot_list[bridge->bus];
@@ -1130,39 +1135,43 @@ static u8 set_controller_speed(struct controller *ctrl, u8 adapter_speed, u8 hp_
u8 slot_power = readb(ctrl->hpc_reg + SLOT_POWER);
u16 reg16;
u32 leds = readl(ctrl->hpc_reg + LED_CONTROL);
-
+
if (ctrl->speed == adapter_speed)
return 0;
-
+
/* We don't allow freq/mode changes if we find another adapter running
- * in another slot on this controller */
+ * in another slot on this controller
+ */
for(slot = ctrl->slot; slot; slot = slot->next) {
- if (slot->device == (hp_slot + ctrl->slot_device_offset))
+ if (slot->device == (hp_slot + ctrl->slot_device_offset))
continue;
if (!slot->hotplug_slot || !slot->hotplug_slot->info)
continue;
- if (slot->hotplug_slot->info->adapter_status == 0)
+ if (slot->hotplug_slot->info->adapter_status == 0)
continue;
/* If another adapter is running on the same segment but at a
* lower speed/mode, we allow the new adapter to function at
- * this rate if supported */
- if (ctrl->speed < adapter_speed)
+ * this rate if supported
+ */
+ if (ctrl->speed < adapter_speed)
return 0;
return 1;
}
-
+
/* If the controller doesn't support freq/mode changes and the
- * controller is running at a higher mode, we bail */
+ * controller is running at a higher mode, we bail
+ */
if ((ctrl->speed > adapter_speed) && (!ctrl->pcix_speed_capability))
return 1;
-
+
/* But we allow the adapter to run at a lower rate if possible */
if ((ctrl->speed < adapter_speed) && (!ctrl->pcix_speed_capability))
return 0;
/* We try to set the max speed supported by both the adapter and
- * controller */
+ * controller
+ */
if (ctrl->speed_capability < adapter_speed) {
if (ctrl->speed == ctrl->speed_capability)
return 0;
@@ -1171,22 +1180,22 @@ static u8 set_controller_speed(struct controller *ctrl, u8 adapter_speed, u8 hp_
writel(0x0L, ctrl->hpc_reg + LED_CONTROL);
writeb(0x00, ctrl->hpc_reg + SLOT_ENABLE);
-
- set_SOGO(ctrl);
+
+ set_SOGO(ctrl);
wait_for_ctrl_irq(ctrl);
-
+
if (adapter_speed != PCI_SPEED_133MHz_PCIX)
reg = 0xF5;
else
- reg = 0xF4;
+ reg = 0xF4;
pci_write_config_byte(ctrl->pci_dev, 0x41, reg);
-
+
reg16 = readw(ctrl->hpc_reg + NEXT_CURR_FREQ);
reg16 &= ~0x000F;
switch(adapter_speed) {
- case(PCI_SPEED_133MHz_PCIX):
+ case(PCI_SPEED_133MHz_PCIX):
reg = 0x75;
- reg16 |= 0xB;
+ reg16 |= 0xB;
break;
case(PCI_SPEED_100MHz_PCIX):
reg = 0x74;
@@ -1203,48 +1212,48 @@ static u8 set_controller_speed(struct controller *ctrl, u8 adapter_speed, u8 hp_
default: /* 33MHz PCI 2.2 */
reg = 0x71;
break;
-
+
}
reg16 |= 0xB << 12;
writew(reg16, ctrl->hpc_reg + NEXT_CURR_FREQ);
-
- mdelay(5);
-
+
+ mdelay(5);
+
/* Reenable interrupts */
writel(0, ctrl->hpc_reg + INT_MASK);
- pci_write_config_byte(ctrl->pci_dev, 0x41, reg);
-
+ pci_write_config_byte(ctrl->pci_dev, 0x41, reg);
+
/* Restart state machine */
reg = ~0xF;
pci_read_config_byte(ctrl->pci_dev, 0x43, &reg);
pci_write_config_byte(ctrl->pci_dev, 0x43, reg);
-
+
/* Only if mode change...*/
if (((ctrl->speed == PCI_SPEED_66MHz) && (adapter_speed == PCI_SPEED_66MHz_PCIX)) ||
((ctrl->speed == PCI_SPEED_66MHz_PCIX) && (adapter_speed == PCI_SPEED_66MHz)))
set_SOGO(ctrl);
-
+
wait_for_ctrl_irq(ctrl);
mdelay(1100);
-
+
/* Restore LED/Slot state */
writel(leds, ctrl->hpc_reg + LED_CONTROL);
writeb(slot_power, ctrl->hpc_reg + SLOT_ENABLE);
-
+
set_SOGO(ctrl);
wait_for_ctrl_irq(ctrl);
ctrl->speed = adapter_speed;
slot = cpqhp_find_slot(ctrl, hp_slot + ctrl->slot_device_offset);
- info("Successfully changed frequency/mode for adapter in slot %d\n",
+ info("Successfully changed frequency/mode for adapter in slot %d\n",
slot->number);
return 0;
}
-/* the following routines constitute the bulk of the
- hotplug controller logic
+/* the following routines constitute the bulk of the
+ * hotplug controller logic
*/
@@ -1268,17 +1277,17 @@ static u32 board_replaced(struct pci_func *func, struct controller *ctrl)
hp_slot = func->device - ctrl->slot_device_offset;
- if (readl(ctrl->hpc_reg + INT_INPUT_CLEAR) & (0x01L << hp_slot)) {
- /**********************************
- * The switch is open.
- **********************************/
+ /*
+ * The switch is open.
+ */
+ if (readl(ctrl->hpc_reg + INT_INPUT_CLEAR) & (0x01L << hp_slot))
rc = INTERLOCK_OPEN;
- } else if (is_slot_enabled (ctrl, hp_slot)) {
- /**********************************
- * The board is already on
- **********************************/
+ /*
+ * The board is already on
+ */
+ else if (is_slot_enabled (ctrl, hp_slot))
rc = CARD_FUNCTIONING;
- } else {
+ else {
mutex_lock(&ctrl->crit_sect);
/* turn on board without attaching to the bus */
@@ -1299,7 +1308,7 @@ static u32 board_replaced(struct pci_func *func, struct controller *ctrl)
/* Wait for SOBS to be unset */
wait_for_ctrl_irq (ctrl);
-
+
adapter_speed = get_adapter_speed(ctrl, hp_slot);
if (ctrl->speed != adapter_speed)
if (set_controller_speed(ctrl, adapter_speed, hp_slot))
@@ -1352,7 +1361,8 @@ static u32 board_replaced(struct pci_func *func, struct controller *ctrl)
* Get slot won't work for devices behind
* bridges, but in this case it will always be
* called for the "base" bus/dev/func of an
- * adapter. */
+ * adapter.
+ */
mutex_lock(&ctrl->crit_sect);
@@ -1377,7 +1387,8 @@ static u32 board_replaced(struct pci_func *func, struct controller *ctrl)
* Get slot won't work for devices behind bridges, but
* in this case it will always be called for the "base"
- * bus/dev/func of an adapter. */
+ * bus/dev/func of an adapter.
+ */
mutex_lock(&ctrl->crit_sect);
@@ -1434,7 +1445,8 @@ static u32 board_added(struct pci_func *func, struct controller *ctrl)
wait_for_ctrl_irq (ctrl);
/* Change bits in slot power register to force another shift out
- * NOTE: this is to work around the timer bug */
+ * NOTE: this is to work around the timer bug
+ */
temp_byte = readb(ctrl->hpc_reg + SLOT_POWER);
writeb(0x00, ctrl->hpc_reg + SLOT_POWER);
writeb(temp_byte, ctrl->hpc_reg + SLOT_POWER);
@@ -1443,12 +1455,12 @@ static u32 board_added(struct pci_func *func, struct controller *ctrl)
/* Wait for SOBS to be unset */
wait_for_ctrl_irq (ctrl);
-
+
adapter_speed = get_adapter_speed(ctrl, hp_slot);
if (ctrl->speed != adapter_speed)
if (set_controller_speed(ctrl, adapter_speed, hp_slot))
rc = WRONG_BUS_FREQUENCY;
-
+
/* turn off board without attaching to the bus */
disable_slot_power (ctrl, hp_slot);
@@ -1461,7 +1473,7 @@ static u32 board_added(struct pci_func *func, struct controller *ctrl)
if (rc)
return rc;
-
+
p_slot = cpqhp_find_slot(ctrl, hp_slot + ctrl->slot_device_offset);
/* turn on board and blink green LED */
@@ -1521,7 +1533,7 @@ static u32 board_added(struct pci_func *func, struct controller *ctrl)
}
/* All F's is an empty slot or an invalid board */
- if (temp_register != 0xFFFFFFFF) { /* Check for a board in the slot */
+ if (temp_register != 0xFFFFFFFF) {
res_lists.io_head = ctrl->io_head;
res_lists.mem_head = ctrl->mem_head;
res_lists.p_mem_head = ctrl->p_mem_head;
@@ -1570,9 +1582,8 @@ static u32 board_added(struct pci_func *func, struct controller *ctrl)
index = 0;
do {
new_slot = cpqhp_slot_find(ctrl->bus, func->device, index++);
- if (new_slot && !new_slot->pci_dev) {
+ if (new_slot && !new_slot->pci_dev)
cpqhp_configure_device(ctrl, new_slot);
- }
} while (new_slot);
mutex_lock(&ctrl->crit_sect);
@@ -1859,12 +1870,12 @@ static void interrupt_event_handler(struct controller *ctrl)
info(msg_button_on, p_slot->number);
}
mutex_lock(&ctrl->crit_sect);
-
+
dbg("blink green LED and turn off amber\n");
-
+
amber_LED_off (ctrl, hp_slot);
green_LED_blink (ctrl, hp_slot);
-
+
set_SOGO(ctrl);
/* Wait for SOBS to be unset */
@@ -1958,7 +1969,7 @@ void cpqhp_pushbutton_thread(unsigned long slot)
if (cpqhp_process_SI(ctrl, func) != 0) {
amber_LED_on(ctrl, hp_slot);
green_LED_off(ctrl, hp_slot);
-
+
set_SOGO(ctrl);
/* Wait for SOBS to be unset */
@@ -2079,7 +2090,7 @@ int cpqhp_process_SS(struct controller *ctrl, struct pci_func *func)
struct pci_bus *pci_bus = ctrl->pci_bus;
int physical_slot=0;
- device = func->device;
+ device = func->device;
func = cpqhp_slot_find(ctrl->bus, device, index++);
p_slot = cpqhp_find_slot(ctrl, device);
if (p_slot) {
@@ -2113,9 +2124,8 @@ int cpqhp_process_SS(struct controller *ctrl, struct pci_func *func)
/* If the VGA Enable bit is set, remove isn't
* supported */
- if (BCR & PCI_BRIDGE_CTL_VGA) {
+ if (BCR & PCI_BRIDGE_CTL_VGA)
rc = REMOVE_NOT_SUPPORTED;
- }
}
}
@@ -2183,67 +2193,67 @@ int cpqhp_hardware_test(struct controller *ctrl, int test_num)
num_of_slots = readb(ctrl->hpc_reg + SLOT_MASK) & 0x0f;
switch (test_num) {
- case 1:
- /* Do stuff here! */
-
- /* Do that funky LED thing */
- /* so we can restore them later */
- save_LED = readl(ctrl->hpc_reg + LED_CONTROL);
- work_LED = 0x01010101;
- switch_leds(ctrl, num_of_slots, &work_LED, 0);
- switch_leds(ctrl, num_of_slots, &work_LED, 1);
- switch_leds(ctrl, num_of_slots, &work_LED, 0);
- switch_leds(ctrl, num_of_slots, &work_LED, 1);
-
- work_LED = 0x01010000;
- writel(work_LED, ctrl->hpc_reg + LED_CONTROL);
- switch_leds(ctrl, num_of_slots, &work_LED, 0);
- switch_leds(ctrl, num_of_slots, &work_LED, 1);
- work_LED = 0x00000101;
- writel(work_LED, ctrl->hpc_reg + LED_CONTROL);
- switch_leds(ctrl, num_of_slots, &work_LED, 0);
- switch_leds(ctrl, num_of_slots, &work_LED, 1);
+ case 1:
+ /* Do stuff here! */
+
+ /* Do that funky LED thing */
+ /* so we can restore them later */
+ save_LED = readl(ctrl->hpc_reg + LED_CONTROL);
+ work_LED = 0x01010101;
+ switch_leds(ctrl, num_of_slots, &work_LED, 0);
+ switch_leds(ctrl, num_of_slots, &work_LED, 1);
+ switch_leds(ctrl, num_of_slots, &work_LED, 0);
+ switch_leds(ctrl, num_of_slots, &work_LED, 1);
+
+ work_LED = 0x01010000;
+ writel(work_LED, ctrl->hpc_reg + LED_CONTROL);
+ switch_leds(ctrl, num_of_slots, &work_LED, 0);
+ switch_leds(ctrl, num_of_slots, &work_LED, 1);
+ work_LED = 0x00000101;
+ writel(work_LED, ctrl->hpc_reg + LED_CONTROL);
+ switch_leds(ctrl, num_of_slots, &work_LED, 0);
+ switch_leds(ctrl, num_of_slots, &work_LED, 1);
+
+ work_LED = 0x01010000;
+ writel(work_LED, ctrl->hpc_reg + LED_CONTROL);
+ for (loop = 0; loop < num_of_slots; loop++) {
+ set_SOGO(ctrl);
- work_LED = 0x01010000;
- writel(work_LED, ctrl->hpc_reg + LED_CONTROL);
- for (loop = 0; loop < num_of_slots; loop++) {
- set_SOGO(ctrl);
+ /* Wait for SOGO interrupt */
+ wait_for_ctrl_irq (ctrl);
- /* Wait for SOGO interrupt */
- wait_for_ctrl_irq (ctrl);
+ /* Get ready for next iteration */
+ long_delay((3*HZ)/10);
+ work_LED = work_LED >> 16;
+ writel(work_LED, ctrl->hpc_reg + LED_CONTROL);
- /* Get ready for next iteration */
- long_delay((3*HZ)/10);
- work_LED = work_LED >> 16;
- writel(work_LED, ctrl->hpc_reg + LED_CONTROL);
-
- set_SOGO(ctrl);
+ set_SOGO(ctrl);
- /* Wait for SOGO interrupt */
- wait_for_ctrl_irq (ctrl);
+ /* Wait for SOGO interrupt */
+ wait_for_ctrl_irq (ctrl);
- /* Get ready for next iteration */
- long_delay((3*HZ)/10);
- work_LED = work_LED << 16;
- writel(work_LED, ctrl->hpc_reg + LED_CONTROL);
- work_LED = work_LED << 1;
- writel(work_LED, ctrl->hpc_reg + LED_CONTROL);
- }
+ /* Get ready for next iteration */
+ long_delay((3*HZ)/10);
+ work_LED = work_LED << 16;
+ writel(work_LED, ctrl->hpc_reg + LED_CONTROL);
+ work_LED = work_LED << 1;
+ writel(work_LED, ctrl->hpc_reg + LED_CONTROL);
+ }
- /* put it back the way it was */
- writel(save_LED, ctrl->hpc_reg + LED_CONTROL);
+ /* put it back the way it was */
+ writel(save_LED, ctrl->hpc_reg + LED_CONTROL);
- set_SOGO(ctrl);
+ set_SOGO(ctrl);
- /* Wait for SOBS to be unset */
- wait_for_ctrl_irq (ctrl);
- break;
- case 2:
- /* Do other stuff here! */
- break;
- case 3:
- /* and more... */
- break;
+ /* Wait for SOBS to be unset */
+ wait_for_ctrl_irq (ctrl);
+ break;
+ case 2:
+ /* Do other stuff here! */
+ break;
+ case 3:
+ /* and more... */
+ break;
}
return 0;
}
@@ -2312,9 +2322,9 @@ static u32 configure_new_device(struct controller * ctrl, struct pci_func * func
while ((function < max_functions) && (!stop_it)) {
pci_bus_read_config_dword (ctrl->pci_bus, PCI_DEVFN(func->device, function), 0x00, &ID);
- if (ID == 0xFFFFFFFF) { /* There's nothing there. */
+ if (ID == 0xFFFFFFFF) {
function++;
- } else { /* There's something there */
+ } else {
/* Setup slot structure. */
new_slot = cpqhp_slot_create(func->bus);
@@ -2339,8 +2349,8 @@ static u32 configure_new_device(struct controller * ctrl, struct pci_func * func
/*
- Configuration logic that involves the hotplug data structures and
- their bookkeeping
+ * Configuration logic that involves the hotplug data structures and
+ * their bookkeeping
*/
@@ -2393,7 +2403,7 @@ static int configure_new_function(struct controller *ctrl, struct pci_func *func
if (rc)
return rc;
- if ((temp_byte & 0x7F) == PCI_HEADER_TYPE_BRIDGE) { /* PCI-PCI Bridge */
+ if ((temp_byte & 0x7F) == PCI_HEADER_TYPE_BRIDGE) {
/* set Primary bus */
dbg("set Primary bus = %d\n", func->bus);
rc = pci_bus_write_config_byte(pci_bus, devfn, PCI_PRIMARY_BUS, func->bus);
@@ -2484,7 +2494,8 @@ static int configure_new_function(struct controller *ctrl, struct pci_func *func
temp_resources.irqs = &irqs;
/* Make copies of the nodes we are going to pass down so that
- * if there is a problem,we can just use these to free resources */
+ * if there is a problem,we can just use these to free resources
+ */
hold_bus_node = kmalloc(sizeof(*hold_bus_node), GFP_KERNEL);
hold_IO_node = kmalloc(sizeof(*hold_IO_node), GFP_KERNEL);
hold_mem_node = kmalloc(sizeof(*hold_mem_node), GFP_KERNEL);
@@ -2556,7 +2567,8 @@ static int configure_new_function(struct controller *ctrl, struct pci_func *func
temp_word = (p_mem_node->base + p_mem_node->length - 1) >> 16;
rc = pci_bus_write_config_word (pci_bus, devfn, PCI_PREF_MEMORY_LIMIT, temp_word);
- /* Adjust this to compensate for extra adjustment in first loop */
+ /* Adjust this to compensate for extra adjustment in first loop
+ */
irqs.barber_pole--;
rc = 0;
@@ -2917,27 +2929,26 @@ static int configure_new_function(struct controller *ctrl, struct pci_func *func
} /* End of base register loop */
if (cpqhp_legacy_mode) {
/* Figure out which interrupt pin this function uses */
- rc = pci_bus_read_config_byte (pci_bus, devfn,
+ rc = pci_bus_read_config_byte (pci_bus, devfn,
PCI_INTERRUPT_PIN, &temp_byte);
/* If this function needs an interrupt and we are behind
* a bridge and the pin is tied to something that's
* alread mapped, set this one the same */
- if (temp_byte && resources->irqs &&
- (resources->irqs->valid_INT &
+ if (temp_byte && resources->irqs &&
+ (resources->irqs->valid_INT &
(0x01 << ((temp_byte + resources->irqs->barber_pole - 1) & 0x03)))) {
/* We have to share with something already set up */
- IRQ = resources->irqs->interrupt[(temp_byte +
+ IRQ = resources->irqs->interrupt[(temp_byte +
resources->irqs->barber_pole - 1) & 0x03];
} else {
/* Program IRQ based on card type */
rc = pci_bus_read_config_byte (pci_bus, devfn, 0x0B, &class_code);
- if (class_code == PCI_BASE_CLASS_STORAGE) {
+ if (class_code == PCI_BASE_CLASS_STORAGE)
IRQ = cpqhp_disk_irq;
- } else {
+ else
IRQ = cpqhp_nic_irq;
- }
}
/* IRQ Line */
diff --git a/drivers/pci/hotplug/cpqphp_nvram.c b/drivers/pci/hotplug/cpqphp_nvram.c
index cb174888002b..76ba8a1c774d 100644
--- a/drivers/pci/hotplug/cpqphp_nvram.c
+++ b/drivers/pci/hotplug/cpqphp_nvram.c
@@ -94,12 +94,13 @@ static u8 evbuffer[1024];
static void __iomem *compaq_int15_entry_point;
-static spinlock_t int15_lock; /* lock for ordering int15_bios_call() */
+/* lock for ordering int15_bios_call() */
+static spinlock_t int15_lock;
/* This is a series of function that deals with
- setting & getting the hotplug resource table in some environment variable.
-*/
+ * setting & getting the hotplug resource table in some environment variable.
+ */
/*
* We really shouldn't be doing this unless there is a _very_ good reason to!!!
@@ -113,7 +114,7 @@ static u32 add_byte( u32 **p_buffer, u8 value, u32 *used, u32 *avail)
if ((*used + 1) > *avail)
return(1);
-
+
*((u8*)*p_buffer) = value;
tByte = (u8**)p_buffer;
(*tByte)++;
@@ -170,10 +171,10 @@ static u32 access_EV (u16 operation, u8 *ev_name, u8 *buffer, u32 *buf_size)
unsigned long flags;
int op = operation;
int ret_val;
-
+
if (!compaq_int15_entry_point)
return -ENODEV;
-
+
spin_lock_irqsave(&int15_lock, flags);
__asm__ (
"xorl %%ebx,%%ebx\n" \
@@ -187,7 +188,7 @@ static u32 access_EV (u16 operation, u8 *ev_name, u8 *buffer, u32 *buf_size)
"D" (buffer), "m" (compaq_int15_entry_point)
: "%ebx", "%edx");
spin_unlock_irqrestore(&int15_lock, flags);
-
+
return((ret_val & 0xFF00) >> 8);
}
@@ -210,14 +211,16 @@ static int load_HRT (void __iomem *rom_start)
available = 1024;
- // Now load the EV
+ /* Now load the EV */
temp_dword = available;
rc = access_EV(READ_EV, "CQTHPS", evbuffer, &temp_dword);
evbuffer_length = temp_dword;
- // We're maintaining the resource lists so write FF to invalidate old info
+ /* We're maintaining the resource lists so write FF to invalidate old
+ * info
+ */
temp_dword = 1;
rc = access_EV(WRITE_EV, "CQTHPS", &temp_byte, &temp_dword);
@@ -263,13 +266,13 @@ static u32 store_HRT (void __iomem *rom_start)
p_EV_header = (struct ev_hrt_header *) pFill;
ctrl = cpqhp_ctrl_list;
-
- // The revision of this structure
+
+ /* The revision of this structure */
rc = add_byte( &pFill, 1 + ctrl->push_flag, &usedbytes, &available);
if (rc)
return(rc);
- // The number of controllers
+ /* The number of controllers */
rc = add_byte( &pFill, 1, &usedbytes, &available);
if (rc)
return(rc);
@@ -279,27 +282,27 @@ static u32 store_HRT (void __iomem *rom_start)
numCtrl++;
- // The bus number
+ /* The bus number */
rc = add_byte( &pFill, ctrl->bus, &usedbytes, &available);
if (rc)
return(rc);
- // The device Number
+ /* The device Number */
rc = add_byte( &pFill, PCI_SLOT(ctrl->pci_dev->devfn), &usedbytes, &available);
if (rc)
return(rc);
- // The function Number
+ /* The function Number */
rc = add_byte( &pFill, PCI_FUNC(ctrl->pci_dev->devfn), &usedbytes, &available);
if (rc)
return(rc);
- // Skip the number of available entries
+ /* Skip the number of available entries */
rc = add_dword( &pFill, 0, &usedbytes, &available);
if (rc)
return(rc);
- // Figure out memory Available
+ /* Figure out memory Available */
resNode = ctrl->mem_head;
@@ -308,12 +311,12 @@ static u32 store_HRT (void __iomem *rom_start)
while (resNode) {
loop ++;
- // base
+ /* base */
rc = add_dword( &pFill, resNode->base, &usedbytes, &available);
if (rc)
return(rc);
- // length
+ /* length */
rc = add_dword( &pFill, resNode->length, &usedbytes, &available);
if (rc)
return(rc);
@@ -321,10 +324,10 @@ static u32 store_HRT (void __iomem *rom_start)
resNode = resNode->next;
}
- // Fill in the number of entries
+ /* Fill in the number of entries */
p_ev_ctrl->mem_avail = loop;
- // Figure out prefetchable memory Available
+ /* Figure out prefetchable memory Available */
resNode = ctrl->p_mem_head;
@@ -333,12 +336,12 @@ static u32 store_HRT (void __iomem *rom_start)
while (resNode) {
loop ++;
- // base
+ /* base */
rc = add_dword( &pFill, resNode->base, &usedbytes, &available);
if (rc)
return(rc);
- // length
+ /* length */
rc = add_dword( &pFill, resNode->length, &usedbytes, &available);
if (rc)
return(rc);
@@ -346,10 +349,10 @@ static u32 store_HRT (void __iomem *rom_start)
resNode = resNode->next;
}
- // Fill in the number of entries
+ /* Fill in the number of entries */
p_ev_ctrl->p_mem_avail = loop;
- // Figure out IO Available
+ /* Figure out IO Available */
resNode = ctrl->io_head;
@@ -358,12 +361,12 @@ static u32 store_HRT (void __iomem *rom_start)
while (resNode) {
loop ++;
- // base
+ /* base */
rc = add_dword( &pFill, resNode->base, &usedbytes, &available);
if (rc)
return(rc);
- // length
+ /* length */
rc = add_dword( &pFill, resNode->length, &usedbytes, &available);
if (rc)
return(rc);
@@ -371,10 +374,10 @@ static u32 store_HRT (void __iomem *rom_start)
resNode = resNode->next;
}
- // Fill in the number of entries
+ /* Fill in the number of entries */
p_ev_ctrl->io_avail = loop;
- // Figure out bus Available
+ /* Figure out bus Available */
resNode = ctrl->bus_head;
@@ -383,12 +386,12 @@ static u32 store_HRT (void __iomem *rom_start)
while (resNode) {
loop ++;
- // base
+ /* base */
rc = add_dword( &pFill, resNode->base, &usedbytes, &available);
if (rc)
return(rc);
- // length
+ /* length */
rc = add_dword( &pFill, resNode->length, &usedbytes, &available);
if (rc)
return(rc);
@@ -396,15 +399,15 @@ static u32 store_HRT (void __iomem *rom_start)
resNode = resNode->next;
}
- // Fill in the number of entries
+ /* Fill in the number of entries */
p_ev_ctrl->bus_avail = loop;
ctrl = ctrl->next;
}
-
+
p_EV_header->num_of_ctrl = numCtrl;
- // Now store the EV
+ /* Now store the EV */
temp_dword = usedbytes;
@@ -449,20 +452,21 @@ int compaq_nvram_load (void __iomem *rom_start, struct controller *ctrl)
struct ev_hrt_header *p_EV_header;
if (!evbuffer_init) {
- // Read the resource list information in from NVRAM
+ /* Read the resource list information in from NVRAM */
if (load_HRT(rom_start))
memset (evbuffer, 0, 1024);
evbuffer_init = 1;
}
- // If we saved information in NVRAM, use it now
+ /* If we saved information in NVRAM, use it now */
p_EV_header = (struct ev_hrt_header *) evbuffer;
- // The following code is for systems where version 1.0 of this
- // driver has been loaded, but doesn't support the hardware.
- // In that case, the driver would incorrectly store something
- // in NVRAM.
+ /* The following code is for systems where version 1.0 of this
+ * driver has been loaded, but doesn't support the hardware.
+ * In that case, the driver would incorrectly store something
+ * in NVRAM.
+ */
if ((p_EV_header->Version == 2) ||
((p_EV_header->Version == 1) && !ctrl->push_flag)) {
p_byte = &(p_EV_header->next);
@@ -479,7 +483,7 @@ int compaq_nvram_load (void __iomem *rom_start, struct controller *ctrl)
function = p_ev_ctrl->function;
while ((bus != ctrl->bus) ||
- (device != PCI_SLOT(ctrl->pci_dev->devfn)) ||
+ (device != PCI_SLOT(ctrl->pci_dev->devfn)) ||
(function != PCI_FUNC(ctrl->pci_dev->devfn))) {
nummem = p_ev_ctrl->mem_avail;
numpmem = p_ev_ctrl->p_mem_avail;
@@ -491,7 +495,7 @@ int compaq_nvram_load (void __iomem *rom_start, struct controller *ctrl)
if (p_byte > ((u8*)p_EV_header + evbuffer_length))
return 2;
- // Skip forward to the next entry
+ /* Skip forward to the next entry */
p_byte += (nummem + numpmem + numio + numbus) * 8;
if (p_byte > ((u8*)p_EV_header + evbuffer_length))
@@ -629,8 +633,9 @@ int compaq_nvram_load (void __iomem *rom_start, struct controller *ctrl)
ctrl->bus_head = bus_node;
}
- // If all of the following fail, we don't have any resources for
- // hot plug add
+ /* If all of the following fail, we don't have any resources for
+ * hot plug add
+ */
rc = 1;
rc &= cpqhp_resource_sort_and_combine(&(ctrl->mem_head));
rc &= cpqhp_resource_sort_and_combine(&(ctrl->p_mem_head));
@@ -640,14 +645,14 @@ int compaq_nvram_load (void __iomem *rom_start, struct controller *ctrl)
if (rc)
return(rc);
} else {
- if ((evbuffer[0] != 0) && (!ctrl->push_flag))
+ if ((evbuffer[0] != 0) && (!ctrl->push_flag))
return 1;
}
return 0;
}
-
+
int compaq_nvram_store (void __iomem *rom_start)
{
int rc = 1;
diff --git a/drivers/pci/hotplug/cpqphp_pci.c b/drivers/pci/hotplug/cpqphp_pci.c
index 6c0ed0fcb8ee..6173b9a4544e 100644
--- a/drivers/pci/hotplug/cpqphp_pci.c
+++ b/drivers/pci/hotplug/cpqphp_pci.c
@@ -37,7 +37,6 @@
#include "../pci.h"
#include "cpqphp.h"
#include "cpqphp_nvram.h"
-#include <asm/pci_x86.h>
u8 cpqhp_nic_irq;
@@ -82,14 +81,14 @@ static void __iomem *detect_HRT_floating_pointer(void __iomem *begin, void __iom
}
-int cpqhp_configure_device (struct controller* ctrl, struct pci_func* func)
+int cpqhp_configure_device (struct controller* ctrl, struct pci_func* func)
{
unsigned char bus;
struct pci_bus *child;
int num;
if (func->pci_dev == NULL)
- func->pci_dev = pci_find_slot(func->bus, PCI_DEVFN(func->device, func->function));
+ func->pci_dev = pci_get_bus_and_slot(func->bus,PCI_DEVFN(func->device, func->function));
/* No pci device, we need to create it then */
if (func->pci_dev == NULL) {
@@ -99,7 +98,7 @@ int cpqhp_configure_device (struct controller* ctrl, struct pci_func* func)
if (num)
pci_bus_add_devices(ctrl->pci_dev->bus);
- func->pci_dev = pci_find_slot(func->bus, PCI_DEVFN(func->device, func->function));
+ func->pci_dev = pci_get_bus_and_slot(func->bus, PCI_DEVFN(func->device, func->function));
if (func->pci_dev == NULL) {
dbg("ERROR: pci_dev still null\n");
return 0;
@@ -112,20 +111,24 @@ int cpqhp_configure_device (struct controller* ctrl, struct pci_func* func)
pci_do_scan_bus(child);
}
+ pci_dev_put(func->pci_dev);
+
return 0;
}
-int cpqhp_unconfigure_device(struct pci_func* func)
+int cpqhp_unconfigure_device(struct pci_func* func)
{
int j;
-
+
dbg("%s: bus/dev/func = %x/%x/%x\n", __func__, func->bus, func->device, func->function);
for (j=0; j<8 ; j++) {
- struct pci_dev* temp = pci_find_slot(func->bus, PCI_DEVFN(func->device, j));
- if (temp)
+ struct pci_dev* temp = pci_get_bus_and_slot(func->bus, PCI_DEVFN(func->device, j));
+ if (temp) {
+ pci_dev_put(temp);
pci_remove_bus_device(temp);
+ }
}
return 0;
}
@@ -178,32 +181,22 @@ int cpqhp_set_irq (u8 bus_num, u8 dev_num, u8 int_pin, u8 irq_num)
if (!rc)
return !rc;
- // set the Edge Level Control Register (ELCR)
+ /* set the Edge Level Control Register (ELCR) */
temp_word = inb(0x4d0);
temp_word |= inb(0x4d1) << 8;
temp_word |= 0x01 << irq_num;
- // This should only be for x86 as it sets the Edge Level Control Register
- outb((u8) (temp_word & 0xFF), 0x4d0);
- outb((u8) ((temp_word & 0xFF00) >> 8), 0x4d1);
- rc = 0;
- }
+ /* This should only be for x86 as it sets the Edge Level
+ * Control Register
+ */
+ outb((u8) (temp_word & 0xFF), 0x4d0); outb((u8) ((temp_word &
+ 0xFF00) >> 8), 0x4d1); rc = 0; }
return rc;
}
-/*
- * WTF??? This function isn't in the code, yet a function calls it, but the
- * compiler optimizes it away? strange. Here as a placeholder to keep the
- * compiler happy.
- */
-static int PCI_ScanBusNonBridge (u8 bus, u8 device)
-{
- return 0;
-}
-
static int PCI_ScanBusForNonBridge(struct controller *ctrl, u8 bus_num, u8 * dev_num)
{
u16 tdevice;
@@ -213,11 +206,11 @@ static int PCI_ScanBusForNonBridge(struct controller *ctrl, u8 bus_num, u8 * dev
ctrl->pci_bus->number = bus_num;
for (tdevice = 0; tdevice < 0xFF; tdevice++) {
- //Scan for access first
+ /* Scan for access first */
if (PCI_RefinedAccessConfig(ctrl->pci_bus, tdevice, 0x08, &work) == -1)
continue;
dbg("Looking for nonbridge bus_num %d dev_num %d\n", bus_num, tdevice);
- //Yep we got one. Not a bridge ?
+ /* Yep we got one. Not a bridge ? */
if ((work >> 8) != PCI_TO_PCI_BRIDGE_CLASS) {
*dev_num = tdevice;
dbg("found it !\n");
@@ -225,16 +218,16 @@ static int PCI_ScanBusForNonBridge(struct controller *ctrl, u8 bus_num, u8 * dev
}
}
for (tdevice = 0; tdevice < 0xFF; tdevice++) {
- //Scan for access first
+ /* Scan for access first */
if (PCI_RefinedAccessConfig(ctrl->pci_bus, tdevice, 0x08, &work) == -1)
continue;
dbg("Looking for bridge bus_num %d dev_num %d\n", bus_num, tdevice);
- //Yep we got one. bridge ?
+ /* Yep we got one. bridge ? */
if ((work >> 8) == PCI_TO_PCI_BRIDGE_CLASS) {
pci_bus_read_config_byte (ctrl->pci_bus, PCI_DEVFN(tdevice, 0), PCI_SECONDARY_BUS, &tbus);
+ /* XXX: no recursion, wtf? */
dbg("Recurse on bus_num %d tdevice %d\n", tbus, tdevice);
- if (PCI_ScanBusNonBridge(tbus, tdevice) == 0)
- return 0;
+ return 0;
}
}
@@ -244,39 +237,23 @@ static int PCI_ScanBusForNonBridge(struct controller *ctrl, u8 bus_num, u8 * dev
static int PCI_GetBusDevHelper(struct controller *ctrl, u8 *bus_num, u8 *dev_num, u8 slot, u8 nobridge)
{
- struct irq_routing_table *PCIIRQRoutingInfoLength;
- long len;
- long loop;
+ int loop, len;
u32 work;
-
u8 tbus, tdevice, tslot;
- PCIIRQRoutingInfoLength = pcibios_get_irq_routing_table();
- if (!PCIIRQRoutingInfoLength)
- return -1;
-
- len = (PCIIRQRoutingInfoLength->size -
- sizeof(struct irq_routing_table)) / sizeof(struct irq_info);
- // Make sure I got at least one entry
- if (len == 0) {
- kfree(PCIIRQRoutingInfoLength );
- return -1;
- }
-
+ len = cpqhp_routing_table_length();
for (loop = 0; loop < len; ++loop) {
- tbus = PCIIRQRoutingInfoLength->slots[loop].bus;
- tdevice = PCIIRQRoutingInfoLength->slots[loop].devfn;
- tslot = PCIIRQRoutingInfoLength->slots[loop].slot;
+ tbus = cpqhp_routing_table->slots[loop].bus;
+ tdevice = cpqhp_routing_table->slots[loop].devfn;
+ tslot = cpqhp_routing_table->slots[loop].slot;
if (tslot == slot) {
*bus_num = tbus;
*dev_num = tdevice;
ctrl->pci_bus->number = tbus;
pci_bus_read_config_dword (ctrl->pci_bus, *dev_num, PCI_VENDOR_ID, &work);
- if (!nobridge || (work == 0xffffffff)) {
- kfree(PCIIRQRoutingInfoLength );
+ if (!nobridge || (work == 0xffffffff))
return 0;
- }
dbg("bus_num %d devfn %d\n", *bus_num, *dev_num);
pci_bus_read_config_dword (ctrl->pci_bus, *dev_num, PCI_CLASS_REVISION, &work);
@@ -287,28 +264,26 @@ static int PCI_GetBusDevHelper(struct controller *ctrl, u8 *bus_num, u8 *dev_num
dbg("Scan bus for Non Bridge: bus %d\n", tbus);
if (PCI_ScanBusForNonBridge(ctrl, tbus, dev_num) == 0) {
*bus_num = tbus;
- kfree(PCIIRQRoutingInfoLength );
return 0;
}
- } else {
- kfree(PCIIRQRoutingInfoLength );
+ } else
return 0;
- }
-
}
}
- kfree(PCIIRQRoutingInfoLength );
return -1;
}
int cpqhp_get_bus_dev (struct controller *ctrl, u8 * bus_num, u8 * dev_num, u8 slot)
{
- return PCI_GetBusDevHelper(ctrl, bus_num, dev_num, slot, 0); //plain (bridges allowed)
+ /* plain (bridges allowed) */
+ return PCI_GetBusDevHelper(ctrl, bus_num, dev_num, slot, 0);
}
-/* More PCI configuration routines; this time centered around hotplug controller */
+/* More PCI configuration routines; this time centered around hotplug
+ * controller
+ */
/*
@@ -339,12 +314,12 @@ int cpqhp_save_config(struct controller *ctrl, int busnumber, int is_hot_plug)
int stop_it;
int index;
- // Decide which slots are supported
+ /* Decide which slots are supported */
if (is_hot_plug) {
- //*********************************
- // is_hot_plug is the slot mask
- //*********************************
+ /*
+ * is_hot_plug is the slot mask
+ */
FirstSupported = is_hot_plug >> 4;
LastSupported = FirstSupported + (is_hot_plug & 0x0F) - 1;
} else {
@@ -352,123 +327,127 @@ int cpqhp_save_config(struct controller *ctrl, int busnumber, int is_hot_plug)
LastSupported = 0x1F;
}
- // Save PCI configuration space for all devices in supported slots
+ /* Save PCI configuration space for all devices in supported slots */
ctrl->pci_bus->number = busnumber;
for (device = FirstSupported; device <= LastSupported; device++) {
ID = 0xFFFFFFFF;
- rc = pci_bus_read_config_dword (ctrl->pci_bus, PCI_DEVFN(device, 0), PCI_VENDOR_ID, &ID);
+ rc = pci_bus_read_config_dword(ctrl->pci_bus, PCI_DEVFN(device, 0), PCI_VENDOR_ID, &ID);
+
+ if (ID == 0xFFFFFFFF) {
+ if (is_hot_plug) {
+ /* Setup slot structure with entry for empty
+ * slot
+ */
+ new_slot = cpqhp_slot_create(busnumber);
+ if (new_slot == NULL)
+ return 1;
- if (ID != 0xFFFFFFFF) { // device in slot
- rc = pci_bus_read_config_byte (ctrl->pci_bus, PCI_DEVFN(device, 0), 0x0B, &class_code);
- if (rc)
- return rc;
+ new_slot->bus = (u8) busnumber;
+ new_slot->device = (u8) device;
+ new_slot->function = 0;
+ new_slot->is_a_board = 0;
+ new_slot->presence_save = 0;
+ new_slot->switch_save = 0;
+ }
+ continue;
+ }
- rc = pci_bus_read_config_byte (ctrl->pci_bus, PCI_DEVFN(device, 0), PCI_HEADER_TYPE, &header_type);
- if (rc)
- return rc;
+ rc = pci_bus_read_config_byte(ctrl->pci_bus, PCI_DEVFN(device, 0), 0x0B, &class_code);
+ if (rc)
+ return rc;
- // If multi-function device, set max_functions to 8
- if (header_type & 0x80)
- max_functions = 8;
- else
- max_functions = 1;
+ rc = pci_bus_read_config_byte(ctrl->pci_bus, PCI_DEVFN(device, 0), PCI_HEADER_TYPE, &header_type);
+ if (rc)
+ return rc;
- function = 0;
+ /* If multi-function device, set max_functions to 8 */
+ if (header_type & 0x80)
+ max_functions = 8;
+ else
+ max_functions = 1;
- do {
- DevError = 0;
+ function = 0;
- if ((header_type & 0x7F) == PCI_HEADER_TYPE_BRIDGE) { // P-P Bridge
- // Recurse the subordinate bus
- // get the subordinate bus number
- rc = pci_bus_read_config_byte (ctrl->pci_bus, PCI_DEVFN(device, function), PCI_SECONDARY_BUS, &secondary_bus);
- if (rc) {
+ do {
+ DevError = 0;
+ if ((header_type & 0x7F) == PCI_HEADER_TYPE_BRIDGE) {
+ /* Recurse the subordinate bus
+ * get the subordinate bus number
+ */
+ rc = pci_bus_read_config_byte(ctrl->pci_bus, PCI_DEVFN(device, function), PCI_SECONDARY_BUS, &secondary_bus);
+ if (rc) {
+ return rc;
+ } else {
+ sub_bus = (int) secondary_bus;
+
+ /* Save secondary bus cfg spc
+ * with this recursive call.
+ */
+ rc = cpqhp_save_config(ctrl, sub_bus, 0);
+ if (rc)
return rc;
- } else {
- sub_bus = (int) secondary_bus;
-
- // Save secondary bus cfg spc
- // with this recursive call.
- rc = cpqhp_save_config(ctrl, sub_bus, 0);
- if (rc)
- return rc;
- ctrl->pci_bus->number = busnumber;
- }
+ ctrl->pci_bus->number = busnumber;
}
+ }
- index = 0;
+ index = 0;
+ new_slot = cpqhp_slot_find(busnumber, device, index++);
+ while (new_slot &&
+ (new_slot->function != (u8) function))
new_slot = cpqhp_slot_find(busnumber, device, index++);
- while (new_slot &&
- (new_slot->function != (u8) function))
- new_slot = cpqhp_slot_find(busnumber, device, index++);
- if (!new_slot) {
- // Setup slot structure.
- new_slot = cpqhp_slot_create(busnumber);
-
- if (new_slot == NULL)
- return(1);
- }
-
- new_slot->bus = (u8) busnumber;
- new_slot->device = (u8) device;
- new_slot->function = (u8) function;
- new_slot->is_a_board = 1;
- new_slot->switch_save = 0x10;
- // In case of unsupported board
- new_slot->status = DevError;
- new_slot->pci_dev = pci_find_slot(new_slot->bus, (new_slot->device << 3) | new_slot->function);
-
- for (cloop = 0; cloop < 0x20; cloop++) {
- rc = pci_bus_read_config_dword (ctrl->pci_bus, PCI_DEVFN(device, function), cloop << 2, (u32 *) & (new_slot-> config_space [cloop]));
- if (rc)
- return rc;
- }
+ if (!new_slot) {
+ /* Setup slot structure. */
+ new_slot = cpqhp_slot_create(busnumber);
+ if (new_slot == NULL)
+ return 1;
+ }
- function++;
+ new_slot->bus = (u8) busnumber;
+ new_slot->device = (u8) device;
+ new_slot->function = (u8) function;
+ new_slot->is_a_board = 1;
+ new_slot->switch_save = 0x10;
+ /* In case of unsupported board */
+ new_slot->status = DevError;
+ new_slot->pci_dev = pci_get_bus_and_slot(new_slot->bus, (new_slot->device << 3) | new_slot->function);
- stop_it = 0;
+ for (cloop = 0; cloop < 0x20; cloop++) {
+ rc = pci_bus_read_config_dword(ctrl->pci_bus, PCI_DEVFN(device, function), cloop << 2, (u32 *) & (new_slot-> config_space [cloop]));
+ if (rc)
+ return rc;
+ }
- // this loop skips to the next present function
- // reading in Class Code and Header type.
+ pci_dev_put(new_slot->pci_dev);
- while ((function < max_functions)&&(!stop_it)) {
- rc = pci_bus_read_config_dword (ctrl->pci_bus, PCI_DEVFN(device, function), PCI_VENDOR_ID, &ID);
- if (ID == 0xFFFFFFFF) { // nothing there.
- function++;
- } else { // Something there
- rc = pci_bus_read_config_byte (ctrl->pci_bus, PCI_DEVFN(device, function), 0x0B, &class_code);
- if (rc)
- return rc;
+ function++;
- rc = pci_bus_read_config_byte (ctrl->pci_bus, PCI_DEVFN(device, function), PCI_HEADER_TYPE, &header_type);
- if (rc)
- return rc;
+ stop_it = 0;
- stop_it++;
- }
+ /* this loop skips to the next present function
+ * reading in Class Code and Header type.
+ */
+ while ((function < max_functions) && (!stop_it)) {
+ rc = pci_bus_read_config_dword(ctrl->pci_bus, PCI_DEVFN(device, function), PCI_VENDOR_ID, &ID);
+ if (ID == 0xFFFFFFFF) {
+ function++;
+ continue;
}
+ rc = pci_bus_read_config_byte(ctrl->pci_bus, PCI_DEVFN(device, function), 0x0B, &class_code);
+ if (rc)
+ return rc;
- } while (function < max_functions);
- } // End of IF (device in slot?)
- else if (is_hot_plug) {
- // Setup slot structure with entry for empty slot
- new_slot = cpqhp_slot_create(busnumber);
+ rc = pci_bus_read_config_byte(ctrl->pci_bus, PCI_DEVFN(device, function), PCI_HEADER_TYPE, &header_type);
+ if (rc)
+ return rc;
- if (new_slot == NULL) {
- return(1);
+ stop_it++;
}
- new_slot->bus = (u8) busnumber;
- new_slot->device = (u8) device;
- new_slot->function = 0;
- new_slot->is_a_board = 0;
- new_slot->presence_save = 0;
- new_slot->switch_save = 0;
- }
- } // End of FOR loop
+ } while (function < max_functions);
+ } /* End of FOR loop */
- return(0);
+ return 0;
}
@@ -489,7 +468,7 @@ int cpqhp_save_slot_config (struct controller *ctrl, struct pci_func * new_slot)
u8 secondary_bus;
int sub_bus;
int max_functions;
- int function;
+ int function = 0;
int cloop = 0;
int stop_it;
@@ -498,63 +477,58 @@ int cpqhp_save_slot_config (struct controller *ctrl, struct pci_func * new_slot)
ctrl->pci_bus->number = new_slot->bus;
pci_bus_read_config_dword (ctrl->pci_bus, PCI_DEVFN(new_slot->device, 0), PCI_VENDOR_ID, &ID);
- if (ID != 0xFFFFFFFF) { // device in slot
- pci_bus_read_config_byte (ctrl->pci_bus, PCI_DEVFN(new_slot->device, 0), 0x0B, &class_code);
- pci_bus_read_config_byte (ctrl->pci_bus, PCI_DEVFN(new_slot->device, 0), PCI_HEADER_TYPE, &header_type);
-
- if (header_type & 0x80) // Multi-function device
- max_functions = 8;
- else
- max_functions = 1;
-
- function = 0;
-
- do {
- if ((header_type & 0x7F) == PCI_HEADER_TYPE_BRIDGE) { // PCI-PCI Bridge
- // Recurse the subordinate bus
- pci_bus_read_config_byte (ctrl->pci_bus, PCI_DEVFN(new_slot->device, function), PCI_SECONDARY_BUS, &secondary_bus);
+ if (ID == 0xFFFFFFFF)
+ return 2;
- sub_bus = (int) secondary_bus;
+ pci_bus_read_config_byte(ctrl->pci_bus, PCI_DEVFN(new_slot->device, 0), 0x0B, &class_code);
+ pci_bus_read_config_byte(ctrl->pci_bus, PCI_DEVFN(new_slot->device, 0), PCI_HEADER_TYPE, &header_type);
- // Save the config headers for the secondary bus.
- rc = cpqhp_save_config(ctrl, sub_bus, 0);
- if (rc)
- return(rc);
- ctrl->pci_bus->number = new_slot->bus;
+ if (header_type & 0x80) /* Multi-function device */
+ max_functions = 8;
+ else
+ max_functions = 1;
- } // End of IF
+ while (function < max_functions) {
+ if ((header_type & 0x7F) == PCI_HEADER_TYPE_BRIDGE) {
+ /* Recurse the subordinate bus */
+ pci_bus_read_config_byte (ctrl->pci_bus, PCI_DEVFN(new_slot->device, function), PCI_SECONDARY_BUS, &secondary_bus);
- new_slot->status = 0;
+ sub_bus = (int) secondary_bus;
- for (cloop = 0; cloop < 0x20; cloop++) {
- pci_bus_read_config_dword (ctrl->pci_bus, PCI_DEVFN(new_slot->device, function), cloop << 2, (u32 *) & (new_slot-> config_space [cloop]));
- }
+ /* Save the config headers for the secondary
+ * bus.
+ */
+ rc = cpqhp_save_config(ctrl, sub_bus, 0);
+ if (rc)
+ return(rc);
+ ctrl->pci_bus->number = new_slot->bus;
- function++;
+ }
- stop_it = 0;
+ new_slot->status = 0;
- // this loop skips to the next present function
- // reading in the Class Code and the Header type.
+ for (cloop = 0; cloop < 0x20; cloop++)
+ pci_bus_read_config_dword(ctrl->pci_bus, PCI_DEVFN(new_slot->device, function), cloop << 2, (u32 *) & (new_slot-> config_space [cloop]));
- while ((function < max_functions) && (!stop_it)) {
- pci_bus_read_config_dword (ctrl->pci_bus, PCI_DEVFN(new_slot->device, function), PCI_VENDOR_ID, &ID);
+ function++;
- if (ID == 0xFFFFFFFF) { // nothing there.
- function++;
- } else { // Something there
- pci_bus_read_config_byte (ctrl->pci_bus, PCI_DEVFN(new_slot->device, function), 0x0B, &class_code);
+ stop_it = 0;
- pci_bus_read_config_byte (ctrl->pci_bus, PCI_DEVFN(new_slot->device, function), PCI_HEADER_TYPE, &header_type);
+ /* this loop skips to the next present function
+ * reading in the Class Code and the Header type.
+ */
+ while ((function < max_functions) && (!stop_it)) {
+ pci_bus_read_config_dword(ctrl->pci_bus, PCI_DEVFN(new_slot->device, function), PCI_VENDOR_ID, &ID);
- stop_it++;
- }
+ if (ID == 0xFFFFFFFF)
+ function++;
+ else {
+ pci_bus_read_config_byte(ctrl->pci_bus, PCI_DEVFN(new_slot->device, function), 0x0B, &class_code);
+ pci_bus_read_config_byte(ctrl->pci_bus, PCI_DEVFN(new_slot->device, function), PCI_HEADER_TYPE, &header_type);
+ stop_it++;
}
+ }
- } while (function < max_functions);
- } // End of IF (device in slot?)
- else {
- return 2;
}
return 0;
@@ -590,11 +564,10 @@ int cpqhp_save_base_addr_length(struct controller *ctrl, struct pci_func * func)
pci_bus->number = func->bus;
devfn = PCI_DEVFN(func->device, func->function);
- // Check for Bridge
+ /* Check for Bridge */
pci_bus_read_config_byte (pci_bus, devfn, PCI_HEADER_TYPE, &header_type);
if ((header_type & 0x7F) == PCI_HEADER_TYPE_BRIDGE) {
- // PCI-PCI Bridge
pci_bus_read_config_byte (pci_bus, devfn, PCI_SECONDARY_BUS, &secondary_bus);
sub_bus = (int) secondary_bus;
@@ -610,23 +583,27 @@ int cpqhp_save_base_addr_length(struct controller *ctrl, struct pci_func * func)
}
pci_bus->number = func->bus;
- //FIXME: this loop is duplicated in the non-bridge case. The two could be rolled together
- // Figure out IO and memory base lengths
+ /* FIXME: this loop is duplicated in the non-bridge
+ * case. The two could be rolled together Figure out
+ * IO and memory base lengths
+ */
for (cloop = 0x10; cloop <= 0x14; cloop += 4) {
temp_register = 0xFFFFFFFF;
pci_bus_write_config_dword (pci_bus, devfn, cloop, temp_register);
pci_bus_read_config_dword (pci_bus, devfn, cloop, &base);
-
- if (base) { // If this register is implemented
+ /* If this register is implemented */
+ if (base) {
if (base & 0x01L) {
- // IO base
- // set base = amount of IO space requested
+ /* IO base
+ * set base = amount of IO space
+ * requested
+ */
base = base & 0xFFFFFFFE;
base = (~base) + 1;
type = 1;
} else {
- // memory base
+ /* memory base */
base = base & 0xFFFFFFF0;
base = (~base) + 1;
@@ -637,32 +614,36 @@ int cpqhp_save_base_addr_length(struct controller *ctrl, struct pci_func * func)
type = 0;
}
- // Save information in slot structure
+ /* Save information in slot structure */
func->base_length[(cloop - 0x10) >> 2] =
base;
func->base_type[(cloop - 0x10) >> 2] = type;
- } // End of base register loop
+ } /* End of base register loop */
-
- } else if ((header_type & 0x7F) == 0x00) { // PCI-PCI Bridge
- // Figure out IO and memory base lengths
+ } else if ((header_type & 0x7F) == 0x00) {
+ /* Figure out IO and memory base lengths */
for (cloop = 0x10; cloop <= 0x24; cloop += 4) {
temp_register = 0xFFFFFFFF;
pci_bus_write_config_dword (pci_bus, devfn, cloop, temp_register);
pci_bus_read_config_dword (pci_bus, devfn, cloop, &base);
- if (base) { // If this register is implemented
+ /* If this register is implemented */
+ if (base) {
if (base & 0x01L) {
- // IO base
- // base = amount of IO space requested
+ /* IO base
+ * base = amount of IO space
+ * requested
+ */
base = base & 0xFFFFFFFE;
base = (~base) + 1;
type = 1;
} else {
- // memory base
- // base = amount of memory space requested
+ /* memory base
+ * base = amount of memory
+ * space requested
+ */
base = base & 0xFFFFFFF0;
base = (~base) + 1;
@@ -673,16 +654,16 @@ int cpqhp_save_base_addr_length(struct controller *ctrl, struct pci_func * func)
type = 0;
}
- // Save information in slot structure
+ /* Save information in slot structure */
func->base_length[(cloop - 0x10) >> 2] = base;
func->base_type[(cloop - 0x10) >> 2] = type;
- } // End of base register loop
+ } /* End of base register loop */
- } else { // Some other unknown header type
+ } else { /* Some other unknown header type */
}
- // find the next device in this slot
+ /* find the next device in this slot */
func = cpqhp_slot_find(func->bus, func->device, index++);
}
@@ -728,18 +709,18 @@ int cpqhp_save_used_resources (struct controller *ctrl, struct pci_func * func)
pci_bus->number = func->bus;
devfn = PCI_DEVFN(func->device, func->function);
- // Save the command register
+ /* Save the command register */
pci_bus_read_config_word(pci_bus, devfn, PCI_COMMAND, &save_command);
- // disable card
+ /* disable card */
command = 0x00;
pci_bus_write_config_word(pci_bus, devfn, PCI_COMMAND, command);
- // Check for Bridge
+ /* Check for Bridge */
pci_bus_read_config_byte(pci_bus, devfn, PCI_HEADER_TYPE, &header_type);
- if ((header_type & 0x7F) == PCI_HEADER_TYPE_BRIDGE) { // PCI-PCI Bridge
- // Clear Bridge Control Register
+ if ((header_type & 0x7F) == PCI_HEADER_TYPE_BRIDGE) {
+ /* Clear Bridge Control Register */
command = 0x00;
pci_bus_write_config_word(pci_bus, devfn, PCI_BRIDGE_CONTROL, command);
pci_bus_read_config_byte(pci_bus, devfn, PCI_SECONDARY_BUS, &secondary_bus);
@@ -755,7 +736,7 @@ int cpqhp_save_used_resources (struct controller *ctrl, struct pci_func * func)
bus_node->next = func->bus_head;
func->bus_head = bus_node;
- // Save IO base and Limit registers
+ /* Save IO base and Limit registers */
pci_bus_read_config_byte(pci_bus, devfn, PCI_IO_BASE, &b_base);
pci_bus_read_config_byte(pci_bus, devfn, PCI_IO_LIMIT, &b_length);
@@ -771,7 +752,7 @@ int cpqhp_save_used_resources (struct controller *ctrl, struct pci_func * func)
func->io_head = io_node;
}
- // Save memory base and Limit registers
+ /* Save memory base and Limit registers */
pci_bus_read_config_word(pci_bus, devfn, PCI_MEMORY_BASE, &w_base);
pci_bus_read_config_word(pci_bus, devfn, PCI_MEMORY_LIMIT, &w_length);
@@ -787,7 +768,7 @@ int cpqhp_save_used_resources (struct controller *ctrl, struct pci_func * func)
func->mem_head = mem_node;
}
- // Save prefetchable memory base and Limit registers
+ /* Save prefetchable memory base and Limit registers */
pci_bus_read_config_word(pci_bus, devfn, PCI_PREF_MEMORY_BASE, &w_base);
pci_bus_read_config_word(pci_bus, devfn, PCI_PREF_MEMORY_LIMIT, &w_length);
@@ -802,7 +783,7 @@ int cpqhp_save_used_resources (struct controller *ctrl, struct pci_func * func)
p_mem_node->next = func->p_mem_head;
func->p_mem_head = p_mem_node;
}
- // Figure out IO and memory base lengths
+ /* Figure out IO and memory base lengths */
for (cloop = 0x10; cloop <= 0x14; cloop += 4) {
pci_bus_read_config_dword (pci_bus, devfn, cloop, &save_base);
@@ -812,11 +793,14 @@ int cpqhp_save_used_resources (struct controller *ctrl, struct pci_func * func)
temp_register = base;
- if (base) { // If this register is implemented
+ /* If this register is implemented */
+ if (base) {
if (((base & 0x03L) == 0x01)
&& (save_command & 0x01)) {
- // IO base
- // set temp_register = amount of IO space requested
+ /* IO base
+ * set temp_register = amount
+ * of IO space requested
+ */
temp_register = base & 0xFFFFFFFE;
temp_register = (~temp_register) + 1;
@@ -834,7 +818,7 @@ int cpqhp_save_used_resources (struct controller *ctrl, struct pci_func * func)
} else
if (((base & 0x0BL) == 0x08)
&& (save_command & 0x02)) {
- // prefetchable memory base
+ /* prefetchable memory base */
temp_register = base & 0xFFFFFFF0;
temp_register = (~temp_register) + 1;
@@ -851,7 +835,7 @@ int cpqhp_save_used_resources (struct controller *ctrl, struct pci_func * func)
} else
if (((base & 0x0BL) == 0x00)
&& (save_command & 0x02)) {
- // prefetchable memory base
+ /* prefetchable memory base */
temp_register = base & 0xFFFFFFF0;
temp_register = (~temp_register) + 1;
@@ -868,9 +852,10 @@ int cpqhp_save_used_resources (struct controller *ctrl, struct pci_func * func)
} else
return(1);
}
- } // End of base register loop
- } else if ((header_type & 0x7F) == 0x00) { // Standard header
- // Figure out IO and memory base lengths
+ } /* End of base register loop */
+ /* Standard header */
+ } else if ((header_type & 0x7F) == 0x00) {
+ /* Figure out IO and memory base lengths */
for (cloop = 0x10; cloop <= 0x24; cloop += 4) {
pci_bus_read_config_dword(pci_bus, devfn, cloop, &save_base);
@@ -880,11 +865,14 @@ int cpqhp_save_used_resources (struct controller *ctrl, struct pci_func * func)
temp_register = base;
- if (base) { // If this register is implemented
+ /* If this register is implemented */
+ if (base) {
if (((base & 0x03L) == 0x01)
&& (save_command & 0x01)) {
- // IO base
- // set temp_register = amount of IO space requested
+ /* IO base
+ * set temp_register = amount
+ * of IO space requested
+ */
temp_register = base & 0xFFFFFFFE;
temp_register = (~temp_register) + 1;
@@ -901,7 +889,7 @@ int cpqhp_save_used_resources (struct controller *ctrl, struct pci_func * func)
} else
if (((base & 0x0BL) == 0x08)
&& (save_command & 0x02)) {
- // prefetchable memory base
+ /* prefetchable memory base */
temp_register = base & 0xFFFFFFF0;
temp_register = (~temp_register) + 1;
@@ -918,7 +906,7 @@ int cpqhp_save_used_resources (struct controller *ctrl, struct pci_func * func)
} else
if (((base & 0x0BL) == 0x00)
&& (save_command & 0x02)) {
- // prefetchable memory base
+ /* prefetchable memory base */
temp_register = base & 0xFFFFFFF0;
temp_register = (~temp_register) + 1;
@@ -935,15 +923,14 @@ int cpqhp_save_used_resources (struct controller *ctrl, struct pci_func * func)
} else
return(1);
}
- } // End of base register loop
- } else { // Some other unknown header type
+ } /* End of base register loop */
}
- // find the next device in this slot
+ /* find the next device in this slot */
func = cpqhp_slot_find(func->bus, func->device, index++);
}
- return(0);
+ return 0;
}
@@ -975,16 +962,16 @@ int cpqhp_configure_board(struct controller *ctrl, struct pci_func * func)
pci_bus->number = func->bus;
devfn = PCI_DEVFN(func->device, func->function);
- // Start at the top of config space so that the control
- // registers are programmed last
- for (cloop = 0x3C; cloop > 0; cloop -= 4) {
+ /* Start at the top of config space so that the control
+ * registers are programmed last
+ */
+ for (cloop = 0x3C; cloop > 0; cloop -= 4)
pci_bus_write_config_dword (pci_bus, devfn, cloop, func->config_space[cloop >> 2]);
- }
pci_bus_read_config_byte (pci_bus, devfn, PCI_HEADER_TYPE, &header_type);
- // If this is a bridge device, restore subordinate devices
- if ((header_type & 0x7F) == PCI_HEADER_TYPE_BRIDGE) { // PCI-PCI Bridge
+ /* If this is a bridge device, restore subordinate devices */
+ if ((header_type & 0x7F) == PCI_HEADER_TYPE_BRIDGE) {
pci_bus_read_config_byte (pci_bus, devfn, PCI_SECONDARY_BUS, &secondary_bus);
sub_bus = (int) secondary_bus;
@@ -1000,8 +987,9 @@ int cpqhp_configure_board(struct controller *ctrl, struct pci_func * func)
}
} else {
- // Check all the base Address Registers to make sure
- // they are the same. If not, the board is different.
+ /* Check all the base Address Registers to make sure
+ * they are the same. If not, the board is different.
+ */
for (cloop = 16; cloop < 40; cloop += 4) {
pci_bus_read_config_dword (pci_bus, devfn, cloop, &temp);
@@ -1058,27 +1046,28 @@ int cpqhp_valid_replace(struct controller *ctrl, struct pci_func * func)
pci_bus_read_config_dword (pci_bus, devfn, PCI_VENDOR_ID, &temp_register);
- // No adapter present
+ /* No adapter present */
if (temp_register == 0xFFFFFFFF)
return(NO_ADAPTER_PRESENT);
if (temp_register != func->config_space[0])
return(ADAPTER_NOT_SAME);
- // Check for same revision number and class code
+ /* Check for same revision number and class code */
pci_bus_read_config_dword (pci_bus, devfn, PCI_CLASS_REVISION, &temp_register);
- // Adapter not the same
+ /* Adapter not the same */
if (temp_register != func->config_space[0x08 >> 2])
return(ADAPTER_NOT_SAME);
- // Check for Bridge
+ /* Check for Bridge */
pci_bus_read_config_byte (pci_bus, devfn, PCI_HEADER_TYPE, &header_type);
- if ((header_type & 0x7F) == PCI_HEADER_TYPE_BRIDGE) { // PCI-PCI Bridge
- // In order to continue checking, we must program the
- // bus registers in the bridge to respond to accesses
- // for it's subordinate bus(es)
+ if ((header_type & 0x7F) == PCI_HEADER_TYPE_BRIDGE) {
+ /* In order to continue checking, we must program the
+ * bus registers in the bridge to respond to accesses
+ * for its subordinate bus(es)
+ */
temp_register = func->config_space[0x18 >> 2];
pci_bus_write_config_dword (pci_bus, devfn, PCI_PRIMARY_BUS, temp_register);
@@ -1096,35 +1085,39 @@ int cpqhp_valid_replace(struct controller *ctrl, struct pci_func * func)
}
}
- // Check to see if it is a standard config header
+ /* Check to see if it is a standard config header */
else if ((header_type & 0x7F) == PCI_HEADER_TYPE_NORMAL) {
- // Check subsystem vendor and ID
+ /* Check subsystem vendor and ID */
pci_bus_read_config_dword (pci_bus, devfn, PCI_SUBSYSTEM_VENDOR_ID, &temp_register);
if (temp_register != func->config_space[0x2C >> 2]) {
- // If it's a SMART-2 and the register isn't filled
- // in, ignore the difference because
- // they just have an old rev of the firmware
-
+ /* If it's a SMART-2 and the register isn't
+ * filled in, ignore the difference because
+ * they just have an old rev of the firmware
+ */
if (!((func->config_space[0] == 0xAE100E11)
&& (temp_register == 0x00L)))
return(ADAPTER_NOT_SAME);
}
- // Figure out IO and memory base lengths
+ /* Figure out IO and memory base lengths */
for (cloop = 0x10; cloop <= 0x24; cloop += 4) {
temp_register = 0xFFFFFFFF;
pci_bus_write_config_dword (pci_bus, devfn, cloop, temp_register);
pci_bus_read_config_dword (pci_bus, devfn, cloop, &base);
- if (base) { // If this register is implemented
+
+ /* If this register is implemented */
+ if (base) {
if (base & 0x01L) {
- // IO base
- // set base = amount of IO space requested
+ /* IO base
+ * set base = amount of IO
+ * space requested
+ */
base = base & 0xFFFFFFFE;
base = (~base) + 1;
type = 1;
} else {
- // memory base
+ /* memory base */
base = base & 0xFFFFFFF0;
base = (~base) + 1;
@@ -1135,23 +1128,24 @@ int cpqhp_valid_replace(struct controller *ctrl, struct pci_func * func)
type = 0;
}
- // Check information in slot structure
+ /* Check information in slot structure */
if (func->base_length[(cloop - 0x10) >> 2] != base)
return(ADAPTER_NOT_SAME);
if (func->base_type[(cloop - 0x10) >> 2] != type)
return(ADAPTER_NOT_SAME);
- } // End of base register loop
+ } /* End of base register loop */
- } // End of (type 0 config space) else
+ } /* End of (type 0 config space) else */
else {
- // this is not a type 0 or 1 config space header so
- // we don't know how to do it
+ /* this is not a type 0 or 1 config space header so
+ * we don't know how to do it
+ */
return(DEVICE_TYPE_NOT_SUPPORTED);
}
- // Get the next function
+ /* Get the next function */
func = cpqhp_slot_find(func->bus, func->device, index++);
}
@@ -1168,7 +1162,7 @@ int cpqhp_valid_replace(struct controller *ctrl, struct pci_func * func)
* this function is for hot plug ADD!
*
* returns 0 if success
- */
+ */
int cpqhp_find_available_resources(struct controller *ctrl, void __iomem *rom_start)
{
u8 temp;
@@ -1187,10 +1181,10 @@ int cpqhp_find_available_resources(struct controller *ctrl, void __iomem *rom_st
rom_resource_table = detect_HRT_floating_pointer(rom_start, rom_start+0xffff);
dbg("rom_resource_table = %p\n", rom_resource_table);
- if (rom_resource_table == NULL) {
+ if (rom_resource_table == NULL)
return -ENODEV;
- }
- // Sum all resources and setup resource maps
+
+ /* Sum all resources and setup resource maps */
unused_IRQ = readl(rom_resource_table + UNUSED_IRQ);
dbg("unused_IRQ = %x\n", unused_IRQ);
@@ -1222,13 +1216,11 @@ int cpqhp_find_available_resources(struct controller *ctrl, void __iomem *rom_st
temp = 0;
- if (!cpqhp_nic_irq) {
+ if (!cpqhp_nic_irq)
cpqhp_nic_irq = ctrl->cfgspc_irq;
- }
- if (!cpqhp_disk_irq) {
+ if (!cpqhp_disk_irq)
cpqhp_disk_irq = ctrl->cfgspc_irq;
- }
dbg("cpqhp_disk_irq, cpqhp_nic_irq= %d, %d\n", cpqhp_disk_irq, cpqhp_nic_irq);
@@ -1262,13 +1254,13 @@ int cpqhp_find_available_resources(struct controller *ctrl, void __iomem *rom_st
dev_func, io_base, io_length, mem_base, mem_length, pre_mem_base, pre_mem_length,
primary_bus, secondary_bus, max_bus);
- // If this entry isn't for our controller's bus, ignore it
+ /* If this entry isn't for our controller's bus, ignore it */
if (primary_bus != ctrl->bus) {
i--;
one_slot += sizeof (struct slot_rt);
continue;
}
- // find out if this entry is for an occupied slot
+ /* find out if this entry is for an occupied slot */
ctrl->pci_bus->number = primary_bus;
pci_bus_read_config_dword (ctrl->pci_bus, dev_func, PCI_VENDOR_ID, &temp_dword);
dbg("temp_D_word = %x\n", temp_dword);
@@ -1282,13 +1274,13 @@ int cpqhp_find_available_resources(struct controller *ctrl, void __iomem *rom_st
func = cpqhp_slot_find(primary_bus, dev_func >> 3, index++);
}
- // If we can't find a match, skip this table entry
+ /* If we can't find a match, skip this table entry */
if (!func) {
i--;
one_slot += sizeof (struct slot_rt);
continue;
}
- // this may not work and shouldn't be used
+ /* this may not work and shouldn't be used */
if (secondary_bus != primary_bus)
bridged_slot = 1;
else
@@ -1301,7 +1293,7 @@ int cpqhp_find_available_resources(struct controller *ctrl, void __iomem *rom_st
}
- // If we've got a valid IO base, use it
+ /* If we've got a valid IO base, use it */
temp_dword = io_base + io_length;
@@ -1325,7 +1317,7 @@ int cpqhp_find_available_resources(struct controller *ctrl, void __iomem *rom_st
}
}
- // If we've got a valid memory base, use it
+ /* If we've got a valid memory base, use it */
temp_dword = mem_base + mem_length;
if ((mem_base) && (temp_dword < 0x10000)) {
mem_node = kmalloc(sizeof(*mem_node), GFP_KERNEL);
@@ -1348,8 +1340,9 @@ int cpqhp_find_available_resources(struct controller *ctrl, void __iomem *rom_st
}
}
- // If we've got a valid prefetchable memory base, and
- // the base + length isn't greater than 0xFFFF
+ /* If we've got a valid prefetchable memory base, and
+ * the base + length isn't greater than 0xFFFF
+ */
temp_dword = pre_mem_base + pre_mem_length;
if ((pre_mem_base) && (temp_dword < 0x10000)) {
p_mem_node = kmalloc(sizeof(*p_mem_node), GFP_KERNEL);
@@ -1372,9 +1365,10 @@ int cpqhp_find_available_resources(struct controller *ctrl, void __iomem *rom_st
}
}
- // If we've got a valid bus number, use it
- // The second condition is to ignore bus numbers on
- // populated slots that don't have PCI-PCI bridges
+ /* If we've got a valid bus number, use it
+ * The second condition is to ignore bus numbers on
+ * populated slots that don't have PCI-PCI bridges
+ */
if (secondary_bus && (secondary_bus != primary_bus)) {
bus_node = kmalloc(sizeof(*bus_node), GFP_KERNEL);
if (!bus_node)
@@ -1398,8 +1392,9 @@ int cpqhp_find_available_resources(struct controller *ctrl, void __iomem *rom_st
one_slot += sizeof (struct slot_rt);
}
- // If all of the following fail, we don't have any resources for
- // hot plug add
+ /* If all of the following fail, we don't have any resources for
+ * hot plug add
+ */
rc = 1;
rc &= cpqhp_resource_sort_and_combine(&(ctrl->mem_head));
rc &= cpqhp_resource_sort_and_combine(&(ctrl->p_mem_head));
diff --git a/drivers/pci/hotplug/ibmphp_core.c b/drivers/pci/hotplug/ibmphp_core.c
index 42e4260c3b12..7485ffda950c 100644
--- a/drivers/pci/hotplug/ibmphp_core.c
+++ b/drivers/pci/hotplug/ibmphp_core.c
@@ -1318,7 +1318,6 @@ error:
}
struct hotplug_slot_ops ibmphp_hotplug_slot_ops = {
- .owner = THIS_MODULE,
.set_attention_status = set_attention_status,
.enable_slot = enable_slot,
.disable_slot = ibmphp_disable_slot,
@@ -1421,3 +1420,4 @@ static void __exit ibmphp_exit(void)
}
module_init(ibmphp_init);
+module_exit(ibmphp_exit);
diff --git a/drivers/pci/hotplug/pci_hotplug_core.c b/drivers/pci/hotplug/pci_hotplug_core.c
index 535fce0f07f9..5c5043f239cf 100644
--- a/drivers/pci/hotplug/pci_hotplug_core.c
+++ b/drivers/pci/hotplug/pci_hotplug_core.c
@@ -347,125 +347,129 @@ static struct pci_slot_attribute hotplug_slot_attr_test = {
.store = test_write_file
};
-static int has_power_file(struct pci_slot *pci_slot)
+static bool has_power_file(struct pci_slot *pci_slot)
{
struct hotplug_slot *slot = pci_slot->hotplug;
if ((!slot) || (!slot->ops))
- return -ENODEV;
+ return false;
if ((slot->ops->enable_slot) ||
(slot->ops->disable_slot) ||
(slot->ops->get_power_status))
- return 0;
- return -ENOENT;
+ return true;
+ return false;
}
-static int has_attention_file(struct pci_slot *pci_slot)
+static bool has_attention_file(struct pci_slot *pci_slot)
{
struct hotplug_slot *slot = pci_slot->hotplug;
if ((!slot) || (!slot->ops))
- return -ENODEV;
+ return false;
if ((slot->ops->set_attention_status) ||
(slot->ops->get_attention_status))
- return 0;
- return -ENOENT;
+ return true;
+ return false;
}
-static int has_latch_file(struct pci_slot *pci_slot)
+static bool has_latch_file(struct pci_slot *pci_slot)
{
struct hotplug_slot *slot = pci_slot->hotplug;
if ((!slot) || (!slot->ops))
- return -ENODEV;
+ return false;
if (slot->ops->get_latch_status)
- return 0;
- return -ENOENT;
+ return true;
+ return false;
}
-static int has_adapter_file(struct pci_slot *pci_slot)
+static bool has_adapter_file(struct pci_slot *pci_slot)
{
struct hotplug_slot *slot = pci_slot->hotplug;
if ((!slot) || (!slot->ops))
- return -ENODEV;
+ return false;
if (slot->ops->get_adapter_status)
- return 0;
- return -ENOENT;
+ return true;
+ return false;
}
-static int has_max_bus_speed_file(struct pci_slot *pci_slot)
+static bool has_max_bus_speed_file(struct pci_slot *pci_slot)
{
struct hotplug_slot *slot = pci_slot->hotplug;
if ((!slot) || (!slot->ops))
- return -ENODEV;
+ return false;
if (slot->ops->get_max_bus_speed)
- return 0;
- return -ENOENT;
+ return true;
+ return false;
}
-static int has_cur_bus_speed_file(struct pci_slot *pci_slot)
+static bool has_cur_bus_speed_file(struct pci_slot *pci_slot)
{
struct hotplug_slot *slot = pci_slot->hotplug;
if ((!slot) || (!slot->ops))
- return -ENODEV;
+ return false;
if (slot->ops->get_cur_bus_speed)
- return 0;
- return -ENOENT;
+ return true;
+ return false;
}
-static int has_test_file(struct pci_slot *pci_slot)
+static bool has_test_file(struct pci_slot *pci_slot)
{
struct hotplug_slot *slot = pci_slot->hotplug;
if ((!slot) || (!slot->ops))
- return -ENODEV;
+ return false;
if (slot->ops->hardware_test)
- return 0;
- return -ENOENT;
+ return true;
+ return false;
}
static int fs_add_slot(struct pci_slot *slot)
{
int retval = 0;
- if (has_power_file(slot) == 0) {
- retval = sysfs_create_file(&slot->kobj, &hotplug_slot_attr_power.attr);
+ /* Create symbolic link to the hotplug driver module */
+ pci_hp_create_module_link(slot);
+
+ if (has_power_file(slot)) {
+ retval = sysfs_create_file(&slot->kobj,
+ &hotplug_slot_attr_power.attr);
if (retval)
goto exit_power;
}
- if (has_attention_file(slot) == 0) {
+ if (has_attention_file(slot)) {
retval = sysfs_create_file(&slot->kobj,
&hotplug_slot_attr_attention.attr);
if (retval)
goto exit_attention;
}
- if (has_latch_file(slot) == 0) {
+ if (has_latch_file(slot)) {
retval = sysfs_create_file(&slot->kobj,
&hotplug_slot_attr_latch.attr);
if (retval)
goto exit_latch;
}
- if (has_adapter_file(slot) == 0) {
+ if (has_adapter_file(slot)) {
retval = sysfs_create_file(&slot->kobj,
&hotplug_slot_attr_presence.attr);
if (retval)
goto exit_adapter;
}
- if (has_max_bus_speed_file(slot) == 0) {
+ if (has_max_bus_speed_file(slot)) {
retval = sysfs_create_file(&slot->kobj,
- &hotplug_slot_attr_max_bus_speed.attr);
+ &hotplug_slot_attr_max_bus_speed.attr);
if (retval)
goto exit_max_speed;
}
- if (has_cur_bus_speed_file(slot) == 0) {
+ if (has_cur_bus_speed_file(slot)) {
retval = sysfs_create_file(&slot->kobj,
- &hotplug_slot_attr_cur_bus_speed.attr);
+ &hotplug_slot_attr_cur_bus_speed.attr);
if (retval)
goto exit_cur_speed;
}
- if (has_test_file(slot) == 0) {
+ if (has_test_file(slot)) {
retval = sysfs_create_file(&slot->kobj,
&hotplug_slot_attr_test.attr);
if (retval)
@@ -475,55 +479,61 @@ static int fs_add_slot(struct pci_slot *slot)
goto exit;
exit_test:
- if (has_cur_bus_speed_file(slot) == 0)
- sysfs_remove_file(&slot->kobj, &hotplug_slot_attr_cur_bus_speed.attr);
-
+ if (has_cur_bus_speed_file(slot))
+ sysfs_remove_file(&slot->kobj,
+ &hotplug_slot_attr_cur_bus_speed.attr);
exit_cur_speed:
- if (has_max_bus_speed_file(slot) == 0)
- sysfs_remove_file(&slot->kobj, &hotplug_slot_attr_max_bus_speed.attr);
-
+ if (has_max_bus_speed_file(slot))
+ sysfs_remove_file(&slot->kobj,
+ &hotplug_slot_attr_max_bus_speed.attr);
exit_max_speed:
- if (has_adapter_file(slot) == 0)
- sysfs_remove_file(&slot->kobj, &hotplug_slot_attr_presence.attr);
-
+ if (has_adapter_file(slot))
+ sysfs_remove_file(&slot->kobj,
+ &hotplug_slot_attr_presence.attr);
exit_adapter:
- if (has_latch_file(slot) == 0)
+ if (has_latch_file(slot))
sysfs_remove_file(&slot->kobj, &hotplug_slot_attr_latch.attr);
-
exit_latch:
- if (has_attention_file(slot) == 0)
- sysfs_remove_file(&slot->kobj, &hotplug_slot_attr_attention.attr);
-
+ if (has_attention_file(slot))
+ sysfs_remove_file(&slot->kobj,
+ &hotplug_slot_attr_attention.attr);
exit_attention:
- if (has_power_file(slot) == 0)
+ if (has_power_file(slot))
sysfs_remove_file(&slot->kobj, &hotplug_slot_attr_power.attr);
exit_power:
+ pci_hp_remove_module_link(slot);
exit:
return retval;
}
static void fs_remove_slot(struct pci_slot *slot)
{
- if (has_power_file(slot) == 0)
+ if (has_power_file(slot))
sysfs_remove_file(&slot->kobj, &hotplug_slot_attr_power.attr);
- if (has_attention_file(slot) == 0)
- sysfs_remove_file(&slot->kobj, &hotplug_slot_attr_attention.attr);
+ if (has_attention_file(slot))
+ sysfs_remove_file(&slot->kobj,
+ &hotplug_slot_attr_attention.attr);
- if (has_latch_file(slot) == 0)
+ if (has_latch_file(slot))
sysfs_remove_file(&slot->kobj, &hotplug_slot_attr_latch.attr);
- if (has_adapter_file(slot) == 0)
- sysfs_remove_file(&slot->kobj, &hotplug_slot_attr_presence.attr);
+ if (has_adapter_file(slot))
+ sysfs_remove_file(&slot->kobj,
+ &hotplug_slot_attr_presence.attr);
- if (has_max_bus_speed_file(slot) == 0)
- sysfs_remove_file(&slot->kobj, &hotplug_slot_attr_max_bus_speed.attr);
+ if (has_max_bus_speed_file(slot))
+ sysfs_remove_file(&slot->kobj,
+ &hotplug_slot_attr_max_bus_speed.attr);
- if (has_cur_bus_speed_file(slot) == 0)
- sysfs_remove_file(&slot->kobj, &hotplug_slot_attr_cur_bus_speed.attr);
+ if (has_cur_bus_speed_file(slot))
+ sysfs_remove_file(&slot->kobj,
+ &hotplug_slot_attr_cur_bus_speed.attr);
- if (has_test_file(slot) == 0)
+ if (has_test_file(slot))
sysfs_remove_file(&slot->kobj, &hotplug_slot_attr_test.attr);
+
+ pci_hp_remove_module_link(slot);
}
static struct hotplug_slot *get_slot_from_name (const char *name)
@@ -540,19 +550,22 @@ static struct hotplug_slot *get_slot_from_name (const char *name)
}
/**
- * pci_hp_register - register a hotplug_slot with the PCI hotplug subsystem
+ * __pci_hp_register - register a hotplug_slot with the PCI hotplug subsystem
* @bus: bus this slot is on
* @slot: pointer to the &struct hotplug_slot to register
- * @slot_nr: slot number
+ * @devnr: device number
* @name: name registered with kobject core
+ * @owner: caller module owner
+ * @mod_name: caller module name
*
* Registers a hotplug slot with the pci hotplug subsystem, which will allow
* userspace interaction to the slot.
*
* Returns 0 if successful, anything else for an error.
*/
-int pci_hp_register(struct hotplug_slot *slot, struct pci_bus *bus, int slot_nr,
- const char *name)
+int __pci_hp_register(struct hotplug_slot *slot, struct pci_bus *bus,
+ int devnr, const char *name,
+ struct module *owner, const char *mod_name)
{
int result;
struct pci_slot *pci_slot;
@@ -567,14 +580,16 @@ int pci_hp_register(struct hotplug_slot *slot, struct pci_bus *bus, int slot_nr,
return -EINVAL;
}
- mutex_lock(&pci_hp_mutex);
+ slot->ops->owner = owner;
+ slot->ops->mod_name = mod_name;
+ mutex_lock(&pci_hp_mutex);
/*
* No problems if we call this interface from both ACPI_PCI_SLOT
* driver and call it here again. If we've already created the
* pci_slot, the interface will simply bump the refcount.
*/
- pci_slot = pci_create_slot(bus, slot_nr, name, slot);
+ pci_slot = pci_create_slot(bus, devnr, name, slot);
if (IS_ERR(pci_slot)) {
result = PTR_ERR(pci_slot);
goto out;
@@ -684,6 +699,6 @@ MODULE_LICENSE("GPL");
module_param(debug, bool, 0644);
MODULE_PARM_DESC(debug, "Debugging mode enabled or not");
-EXPORT_SYMBOL_GPL(pci_hp_register);
+EXPORT_SYMBOL_GPL(__pci_hp_register);
EXPORT_SYMBOL_GPL(pci_hp_deregister);
EXPORT_SYMBOL_GPL(pci_hp_change_slot_info);
diff --git a/drivers/pci/hotplug/pciehp.h b/drivers/pci/hotplug/pciehp.h
index 0a368547e633..e6cf096498be 100644
--- a/drivers/pci/hotplug/pciehp.h
+++ b/drivers/pci/hotplug/pciehp.h
@@ -81,7 +81,6 @@ struct slot {
struct hpc_ops *hpc_ops;
struct hotplug_slot *hotplug_slot;
struct list_head slot_list;
- unsigned long last_emi_toggle;
struct delayed_work work; /* work for button event */
struct mutex lock;
};
@@ -203,8 +202,6 @@ struct hpc_ops {
int (*set_attention_status)(struct slot *slot, u8 status);
int (*get_latch_status)(struct slot *slot, u8 *status);
int (*get_adapter_status)(struct slot *slot, u8 *status);
- int (*get_emi_status)(struct slot *slot, u8 *status);
- int (*toggle_emi)(struct slot *slot);
int (*get_max_bus_speed)(struct slot *slot, enum pci_bus_speed *speed);
int (*get_cur_bus_speed)(struct slot *slot, enum pci_bus_speed *speed);
int (*get_max_lnk_width)(struct slot *slot, enum pcie_link_width *val);
diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c
index fb254b2454de..2317557fdee6 100644
--- a/drivers/pci/hotplug/pciehp_core.c
+++ b/drivers/pci/hotplug/pciehp_core.c
@@ -73,7 +73,6 @@ static int get_max_bus_speed (struct hotplug_slot *slot, enum pci_bus_speed *val
static int get_cur_bus_speed (struct hotplug_slot *slot, enum pci_bus_speed *value);
static struct hotplug_slot_ops pciehp_hotplug_slot_ops = {
- .owner = THIS_MODULE,
.set_attention_status = set_attention_status,
.enable_slot = enable_slot,
.disable_slot = disable_slot,
@@ -85,99 +84,6 @@ static struct hotplug_slot_ops pciehp_hotplug_slot_ops = {
.get_cur_bus_speed = get_cur_bus_speed,
};
-/*
- * Check the status of the Electro Mechanical Interlock (EMI)
- */
-static int get_lock_status(struct hotplug_slot *hotplug_slot, u8 *value)
-{
- struct slot *slot = hotplug_slot->private;
- return (slot->hpc_ops->get_emi_status(slot, value));
-}
-
-/*
- * sysfs interface for the Electro Mechanical Interlock (EMI)
- * 1 == locked, 0 == unlocked
- */
-static ssize_t lock_read_file(struct hotplug_slot *slot, char *buf)
-{
- int retval;
- u8 value;
-
- retval = get_lock_status(slot, &value);
- if (retval)
- goto lock_read_exit;
- retval = sprintf (buf, "%d\n", value);
-
-lock_read_exit:
- return retval;
-}
-
-/*
- * Change the status of the Electro Mechanical Interlock (EMI)
- * This is a toggle - in addition there must be at least 1 second
- * in between toggles.
- */
-static int set_lock_status(struct hotplug_slot *hotplug_slot, u8 status)
-{
- struct slot *slot = hotplug_slot->private;
- int retval;
- u8 value;
-
- mutex_lock(&slot->ctrl->crit_sect);
-
- /* has it been >1 sec since our last toggle? */
- if ((get_seconds() - slot->last_emi_toggle) < 1) {
- mutex_unlock(&slot->ctrl->crit_sect);
- return -EINVAL;
- }
-
- /* see what our current state is */
- retval = get_lock_status(hotplug_slot, &value);
- if (retval || (value == status))
- goto set_lock_exit;
-
- slot->hpc_ops->toggle_emi(slot);
-set_lock_exit:
- mutex_unlock(&slot->ctrl->crit_sect);
- return 0;
-}
-
-/*
- * sysfs interface which allows the user to toggle the Electro Mechanical
- * Interlock. Valid values are either 0 or 1. 0 == unlock, 1 == lock
- */
-static ssize_t lock_write_file(struct hotplug_slot *hotplug_slot,
- const char *buf, size_t count)
-{
- struct slot *slot = hotplug_slot->private;
- unsigned long llock;
- u8 lock;
- int retval = 0;
-
- llock = simple_strtoul(buf, NULL, 10);
- lock = (u8)(llock & 0xff);
-
- switch (lock) {
- case 0:
- case 1:
- retval = set_lock_status(hotplug_slot, lock);
- break;
- default:
- ctrl_err(slot->ctrl, "%d is an invalid lock value\n",
- lock);
- retval = -EINVAL;
- }
- if (retval)
- return retval;
- return count;
-}
-
-static struct hotplug_slot_attribute hotplug_slot_attr_lock = {
- .attr = {.name = "lock", .mode = S_IFREG | S_IRUGO | S_IWUSR},
- .show = lock_read_file,
- .store = lock_write_file
-};
-
/**
* release_slot - free up the memory used by a slot
* @hotplug_slot: slot to free
@@ -236,17 +142,6 @@ static int init_slots(struct controller *ctrl)
get_attention_status(hotplug_slot, &info->attention_status);
get_latch_status(hotplug_slot, &info->latch_status);
get_adapter_status(hotplug_slot, &info->adapter_status);
- /* create additional sysfs entries */
- if (EMI(ctrl)) {
- retval = sysfs_create_file(&hotplug_slot->pci_slot->kobj,
- &hotplug_slot_attr_lock.attr);
- if (retval) {
- pci_hp_deregister(hotplug_slot);
- ctrl_err(ctrl, "Cannot create additional sysfs "
- "entries\n");
- goto error_info;
- }
- }
}
return 0;
@@ -261,13 +156,8 @@ error:
static void cleanup_slots(struct controller *ctrl)
{
struct slot *slot;
-
- list_for_each_entry(slot, &ctrl->slot_list, slot_list) {
- if (EMI(ctrl))
- sysfs_remove_file(&slot->hotplug_slot->pci_slot->kobj,
- &hotplug_slot_attr_lock.attr);
+ list_for_each_entry(slot, &ctrl->slot_list, slot_list)
pci_hp_deregister(slot->hotplug_slot);
- }
}
/*
diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c
index 07bd32151146..52813257e5bf 100644
--- a/drivers/pci/hotplug/pciehp_hpc.c
+++ b/drivers/pci/hotplug/pciehp_hpc.c
@@ -422,35 +422,6 @@ static int hpc_query_power_fault(struct slot *slot)
return !!(slot_status & PCI_EXP_SLTSTA_PFD);
}
-static int hpc_get_emi_status(struct slot *slot, u8 *status)
-{
- struct controller *ctrl = slot->ctrl;
- u16 slot_status;
- int retval;
-
- retval = pciehp_readw(ctrl, PCI_EXP_SLTSTA, &slot_status);
- if (retval) {
- ctrl_err(ctrl, "Cannot check EMI status\n");
- return retval;
- }
- *status = !!(slot_status & PCI_EXP_SLTSTA_EIS);
- return retval;
-}
-
-static int hpc_toggle_emi(struct slot *slot)
-{
- u16 slot_cmd;
- u16 cmd_mask;
- int rc;
-
- slot_cmd = PCI_EXP_SLTCTL_EIC;
- cmd_mask = PCI_EXP_SLTCTL_EIC;
- rc = pcie_write_cmd(slot->ctrl, slot_cmd, cmd_mask);
- slot->last_emi_toggle = get_seconds();
-
- return rc;
-}
-
static int hpc_set_attention_status(struct slot *slot, u8 value)
{
struct controller *ctrl = slot->ctrl;
@@ -874,8 +845,6 @@ static struct hpc_ops pciehp_hpc_ops = {
.get_attention_status = hpc_get_attention_status,
.get_latch_status = hpc_get_latch_status,
.get_adapter_status = hpc_get_adapter_status,
- .get_emi_status = hpc_get_emi_status,
- .toggle_emi = hpc_toggle_emi,
.get_max_bus_speed = hpc_get_max_lnk_speed,
.get_cur_bus_speed = hpc_get_cur_lnk_speed,
diff --git a/drivers/pci/hotplug/pcihp_skeleton.c b/drivers/pci/hotplug/pcihp_skeleton.c
index e3dd6cf9e89f..5175d9b26f0b 100644
--- a/drivers/pci/hotplug/pcihp_skeleton.c
+++ b/drivers/pci/hotplug/pcihp_skeleton.c
@@ -82,7 +82,6 @@ static int get_latch_status (struct hotplug_slot *slot, u8 *value);
static int get_adapter_status (struct hotplug_slot *slot, u8 *value);
static struct hotplug_slot_ops skel_hotplug_slot_ops = {
- .owner = THIS_MODULE,
.enable_slot = enable_slot,
.disable_slot = disable_slot,
.set_attention_status = set_attention_status,
diff --git a/drivers/pci/hotplug/rpaphp_core.c b/drivers/pci/hotplug/rpaphp_core.c
index 95d02a08fdc7..c159223389ec 100644
--- a/drivers/pci/hotplug/rpaphp_core.c
+++ b/drivers/pci/hotplug/rpaphp_core.c
@@ -423,7 +423,6 @@ static int disable_slot(struct hotplug_slot *hotplug_slot)
}
struct hotplug_slot_ops rpaphp_hotplug_slot_ops = {
- .owner = THIS_MODULE,
.enable_slot = enable_slot,
.disable_slot = disable_slot,
.set_attention_status = set_attention_status,
diff --git a/drivers/pci/hotplug/sgi_hotplug.c b/drivers/pci/hotplug/sgi_hotplug.c
index 2d6da78fddb6..a4494d78e7c2 100644
--- a/drivers/pci/hotplug/sgi_hotplug.c
+++ b/drivers/pci/hotplug/sgi_hotplug.c
@@ -83,7 +83,6 @@ static int disable_slot(struct hotplug_slot *slot);
static inline int get_power_status(struct hotplug_slot *slot, u8 *value);
static struct hotplug_slot_ops sn_hotplug_slot_ops = {
- .owner = THIS_MODULE,
.enable_slot = enable_slot,
.disable_slot = disable_slot,
.get_power_status = get_power_status,
diff --git a/drivers/pci/hotplug/shpchp_core.c b/drivers/pci/hotplug/shpchp_core.c
index fe8d149c2293..8a520a3d0f59 100644
--- a/drivers/pci/hotplug/shpchp_core.c
+++ b/drivers/pci/hotplug/shpchp_core.c
@@ -69,7 +69,6 @@ static int get_max_bus_speed (struct hotplug_slot *slot, enum pci_bus_speed *val
static int get_cur_bus_speed (struct hotplug_slot *slot, enum pci_bus_speed *value);
static struct hotplug_slot_ops shpchp_hotplug_slot_ops = {
- .owner = THIS_MODULE,
.set_attention_status = set_attention_status,
.enable_slot = enable_slot,
.disable_slot = disable_slot,
diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c
index cd389162735f..ebc9b8dca881 100644
--- a/drivers/pci/intel-iommu.c
+++ b/drivers/pci/intel-iommu.c
@@ -53,15 +53,35 @@
#define DEFAULT_DOMAIN_ADDRESS_WIDTH 48
+#define MAX_AGAW_WIDTH 64
+
#define DOMAIN_MAX_ADDR(gaw) ((((u64)1) << gaw) - 1)
+#define DOMAIN_MAX_PFN(gaw) ((((u64)1) << (gaw-VTD_PAGE_SHIFT)) - 1)
#define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT)
#define DMA_32BIT_PFN IOVA_PFN(DMA_BIT_MASK(32))
#define DMA_64BIT_PFN IOVA_PFN(DMA_BIT_MASK(64))
-#ifndef PHYSICAL_PAGE_MASK
-#define PHYSICAL_PAGE_MASK PAGE_MASK
-#endif
+
+/* VT-d pages must always be _smaller_ than MM pages. Otherwise things
+ are never going to work. */
+static inline unsigned long dma_to_mm_pfn(unsigned long dma_pfn)
+{
+ return dma_pfn >> (PAGE_SHIFT - VTD_PAGE_SHIFT);
+}
+
+static inline unsigned long mm_to_dma_pfn(unsigned long mm_pfn)
+{
+ return mm_pfn << (PAGE_SHIFT - VTD_PAGE_SHIFT);
+}
+static inline unsigned long page_to_dma_pfn(struct page *pg)
+{
+ return mm_to_dma_pfn(page_to_pfn(pg));
+}
+static inline unsigned long virt_to_dma_pfn(void *p)
+{
+ return page_to_dma_pfn(virt_to_page(p));
+}
/* global iommu list, set NULL for ignored DMAR units */
static struct intel_iommu **g_iommus;
@@ -131,8 +151,6 @@ static inline void context_set_fault_enable(struct context_entry *context)
context->lo &= (((u64)-1) << 2) | 1;
}
-#define CONTEXT_TT_MULTI_LEVEL 0
-
static inline void context_set_translation_type(struct context_entry *context,
unsigned long value)
{
@@ -204,12 +222,17 @@ static inline void dma_set_pte_prot(struct dma_pte *pte, unsigned long prot)
static inline u64 dma_pte_addr(struct dma_pte *pte)
{
- return (pte->val & VTD_PAGE_MASK);
+#ifdef CONFIG_64BIT
+ return pte->val & VTD_PAGE_MASK;
+#else
+ /* Must have a full atomic 64-bit read */
+ return __cmpxchg64(pte, 0ULL, 0ULL) & VTD_PAGE_MASK;
+#endif
}
-static inline void dma_set_pte_addr(struct dma_pte *pte, u64 addr)
+static inline void dma_set_pte_pfn(struct dma_pte *pte, unsigned long pfn)
{
- pte->val |= (addr & VTD_PAGE_MASK);
+ pte->val |= (uint64_t)pfn << VTD_PAGE_SHIFT;
}
static inline bool dma_pte_present(struct dma_pte *pte)
@@ -217,6 +240,19 @@ static inline bool dma_pte_present(struct dma_pte *pte)
return (pte->val & 3) != 0;
}
+static inline int first_pte_in_page(struct dma_pte *pte)
+{
+ return !((unsigned long)pte & ~VTD_PAGE_MASK);
+}
+
+/*
+ * This domain is a statically identity mapping domain.
+ * 1. This domain creats a static 1:1 mapping to all usable memory.
+ * 2. It maps to each iommu if successful.
+ * 3. Each iommu mapps to this domain if successful.
+ */
+struct dmar_domain *si_domain;
+
/* devices under the same p2p bridge are owned in one domain */
#define DOMAIN_FLAG_P2P_MULTIPLE_DEVICES (1 << 0)
@@ -225,6 +261,9 @@ static inline bool dma_pte_present(struct dma_pte *pte)
*/
#define DOMAIN_FLAG_VIRTUAL_MACHINE (1 << 1)
+/* si_domain contains mulitple devices */
+#define DOMAIN_FLAG_STATIC_IDENTITY (1 << 2)
+
struct dmar_domain {
int id; /* domain id */
unsigned long iommu_bmp; /* bitmap of iommus this domain uses*/
@@ -233,7 +272,6 @@ struct dmar_domain {
struct iova_domain iovad; /* iova's that belong to this domain */
struct dma_pte *pgd; /* virtual address */
- spinlock_t mapping_lock; /* page table lock */
int gaw; /* max guest address width */
/* adjusted guest address width, 0 is level 2 30-bit */
@@ -256,6 +294,7 @@ struct device_domain_info {
u8 bus; /* PCI bus number */
u8 devfn; /* PCI devfn number */
struct pci_dev *dev; /* it's NULL for PCIE-to-PCI bridge */
+ struct intel_iommu *iommu; /* IOMMU used by this device */
struct dmar_domain *domain; /* pointer to domain */
};
@@ -401,17 +440,13 @@ void free_iova_mem(struct iova *iova)
static inline int width_to_agaw(int width);
-/* calculate agaw for each iommu.
- * "SAGAW" may be different across iommus, use a default agaw, and
- * get a supported less agaw for iommus that don't support the default agaw.
- */
-int iommu_calculate_agaw(struct intel_iommu *iommu)
+static int __iommu_calculate_agaw(struct intel_iommu *iommu, int max_gaw)
{
unsigned long sagaw;
int agaw = -1;
sagaw = cap_sagaw(iommu->cap);
- for (agaw = width_to_agaw(DEFAULT_DOMAIN_ADDRESS_WIDTH);
+ for (agaw = width_to_agaw(max_gaw);
agaw >= 0; agaw--) {
if (test_bit(agaw, &sagaw))
break;
@@ -420,12 +455,32 @@ int iommu_calculate_agaw(struct intel_iommu *iommu)
return agaw;
}
-/* in native case, each domain is related to only one iommu */
+/*
+ * Calculate max SAGAW for each iommu.
+ */
+int iommu_calculate_max_sagaw(struct intel_iommu *iommu)
+{
+ return __iommu_calculate_agaw(iommu, MAX_AGAW_WIDTH);
+}
+
+/*
+ * calculate agaw for each iommu.
+ * "SAGAW" may be different across iommus, use a default agaw, and
+ * get a supported less agaw for iommus that don't support the default agaw.
+ */
+int iommu_calculate_agaw(struct intel_iommu *iommu)
+{
+ return __iommu_calculate_agaw(iommu, DEFAULT_DOMAIN_ADDRESS_WIDTH);
+}
+
+/* This functionin only returns single iommu in a domain */
static struct intel_iommu *domain_get_iommu(struct dmar_domain *domain)
{
int iommu_id;
+ /* si_domain and vm domain should not get here. */
BUG_ON(domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE);
+ BUG_ON(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY);
iommu_id = find_first_bit(&domain->iommu_bmp, g_num_of_iommus);
if (iommu_id < 0 || iommu_id >= g_num_of_iommus)
@@ -620,80 +675,78 @@ static inline int width_to_agaw(int width)
static inline unsigned int level_to_offset_bits(int level)
{
- return (12 + (level - 1) * LEVEL_STRIDE);
+ return (level - 1) * LEVEL_STRIDE;
}
-static inline int address_level_offset(u64 addr, int level)
+static inline int pfn_level_offset(unsigned long pfn, int level)
{
- return ((addr >> level_to_offset_bits(level)) & LEVEL_MASK);
+ return (pfn >> level_to_offset_bits(level)) & LEVEL_MASK;
}
-static inline u64 level_mask(int level)
+static inline unsigned long level_mask(int level)
{
- return ((u64)-1 << level_to_offset_bits(level));
+ return -1UL << level_to_offset_bits(level);
}
-static inline u64 level_size(int level)
+static inline unsigned long level_size(int level)
{
- return ((u64)1 << level_to_offset_bits(level));
+ return 1UL << level_to_offset_bits(level);
}
-static inline u64 align_to_level(u64 addr, int level)
+static inline unsigned long align_to_level(unsigned long pfn, int level)
{
- return ((addr + level_size(level) - 1) & level_mask(level));
+ return (pfn + level_size(level) - 1) & level_mask(level);
}
-static struct dma_pte * addr_to_dma_pte(struct dmar_domain *domain, u64 addr)
+static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
+ unsigned long pfn)
{
- int addr_width = agaw_to_width(domain->agaw);
+ int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
struct dma_pte *parent, *pte = NULL;
int level = agaw_to_level(domain->agaw);
int offset;
- unsigned long flags;
BUG_ON(!domain->pgd);
-
- addr &= (((u64)1) << addr_width) - 1;
+ BUG_ON(addr_width < BITS_PER_LONG && pfn >> addr_width);
parent = domain->pgd;
- spin_lock_irqsave(&domain->mapping_lock, flags);
while (level > 0) {
void *tmp_page;
- offset = address_level_offset(addr, level);
+ offset = pfn_level_offset(pfn, level);
pte = &parent[offset];
if (level == 1)
break;
if (!dma_pte_present(pte)) {
+ uint64_t pteval;
+
tmp_page = alloc_pgtable_page();
- if (!tmp_page) {
- spin_unlock_irqrestore(&domain->mapping_lock,
- flags);
+ if (!tmp_page)
return NULL;
+
+ domain_flush_cache(domain, tmp_page, VTD_PAGE_SIZE);
+ pteval = (virt_to_dma_pfn(tmp_page) << VTD_PAGE_SHIFT) | DMA_PTE_READ | DMA_PTE_WRITE;
+ if (cmpxchg64(&pte->val, 0ULL, pteval)) {
+ /* Someone else set it while we were thinking; use theirs. */
+ free_pgtable_page(tmp_page);
+ } else {
+ dma_pte_addr(pte);
+ domain_flush_cache(domain, pte, sizeof(*pte));
}
- domain_flush_cache(domain, tmp_page, PAGE_SIZE);
- dma_set_pte_addr(pte, virt_to_phys(tmp_page));
- /*
- * high level table always sets r/w, last level page
- * table control read/write
- */
- dma_set_pte_readable(pte);
- dma_set_pte_writable(pte);
- domain_flush_cache(domain, pte, sizeof(*pte));
}
parent = phys_to_virt(dma_pte_addr(pte));
level--;
}
- spin_unlock_irqrestore(&domain->mapping_lock, flags);
return pte;
}
/* return address's pte at specific level */
-static struct dma_pte *dma_addr_level_pte(struct dmar_domain *domain, u64 addr,
- int level)
+static struct dma_pte *dma_pfn_level_pte(struct dmar_domain *domain,
+ unsigned long pfn,
+ int level)
{
struct dma_pte *parent, *pte = NULL;
int total = agaw_to_level(domain->agaw);
@@ -701,7 +754,7 @@ static struct dma_pte *dma_addr_level_pte(struct dmar_domain *domain, u64 addr,
parent = domain->pgd;
while (level <= total) {
- offset = address_level_offset(addr, total);
+ offset = pfn_level_offset(pfn, total);
pte = &parent[offset];
if (level == total)
return pte;
@@ -714,74 +767,82 @@ static struct dma_pte *dma_addr_level_pte(struct dmar_domain *domain, u64 addr,
return NULL;
}
-/* clear one page's page table */
-static void dma_pte_clear_one(struct dmar_domain *domain, u64 addr)
-{
- struct dma_pte *pte = NULL;
-
- /* get last level pte */
- pte = dma_addr_level_pte(domain, addr, 1);
-
- if (pte) {
- dma_clear_pte(pte);
- domain_flush_cache(domain, pte, sizeof(*pte));
- }
-}
-
/* clear last level pte, a tlb flush should be followed */
-static void dma_pte_clear_range(struct dmar_domain *domain, u64 start, u64 end)
+static void dma_pte_clear_range(struct dmar_domain *domain,
+ unsigned long start_pfn,
+ unsigned long last_pfn)
{
- int addr_width = agaw_to_width(domain->agaw);
- int npages;
+ int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
+ struct dma_pte *first_pte, *pte;
+
+ BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width);
+ BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width);
- start &= (((u64)1) << addr_width) - 1;
- end &= (((u64)1) << addr_width) - 1;
- /* in case it's partial page */
- start &= PAGE_MASK;
- end = PAGE_ALIGN(end);
- npages = (end - start) / VTD_PAGE_SIZE;
+ /* we don't need lock here; nobody else touches the iova range */
+ while (start_pfn <= last_pfn) {
+ first_pte = pte = dma_pfn_level_pte(domain, start_pfn, 1);
+ if (!pte) {
+ start_pfn = align_to_level(start_pfn + 1, 2);
+ continue;
+ }
+ do {
+ dma_clear_pte(pte);
+ start_pfn++;
+ pte++;
+ } while (start_pfn <= last_pfn && !first_pte_in_page(pte));
- /* we don't need lock here, nobody else touches the iova range */
- while (npages--) {
- dma_pte_clear_one(domain, start);
- start += VTD_PAGE_SIZE;
+ domain_flush_cache(domain, first_pte,
+ (void *)pte - (void *)first_pte);
}
}
/* free page table pages. last level pte should already be cleared */
static void dma_pte_free_pagetable(struct dmar_domain *domain,
- u64 start, u64 end)
+ unsigned long start_pfn,
+ unsigned long last_pfn)
{
- int addr_width = agaw_to_width(domain->agaw);
- struct dma_pte *pte;
+ int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
+ struct dma_pte *first_pte, *pte;
int total = agaw_to_level(domain->agaw);
int level;
- u64 tmp;
+ unsigned long tmp;
- start &= (((u64)1) << addr_width) - 1;
- end &= (((u64)1) << addr_width) - 1;
+ BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width);
+ BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width);
- /* we don't need lock here, nobody else touches the iova range */
+ /* We don't need lock here; nobody else touches the iova range */
level = 2;
while (level <= total) {
- tmp = align_to_level(start, level);
- if (tmp >= end || (tmp + level_size(level) > end))
+ tmp = align_to_level(start_pfn, level);
+
+ /* If we can't even clear one PTE at this level, we're done */
+ if (tmp + level_size(level) - 1 > last_pfn)
return;
- while (tmp < end) {
- pte = dma_addr_level_pte(domain, tmp, level);
- if (pte) {
- free_pgtable_page(
- phys_to_virt(dma_pte_addr(pte)));
- dma_clear_pte(pte);
- domain_flush_cache(domain, pte, sizeof(*pte));
+ while (tmp + level_size(level) - 1 <= last_pfn) {
+ first_pte = pte = dma_pfn_level_pte(domain, tmp, level);
+ if (!pte) {
+ tmp = align_to_level(tmp + 1, level + 1);
+ continue;
}
- tmp += level_size(level);
+ do {
+ if (dma_pte_present(pte)) {
+ free_pgtable_page(phys_to_virt(dma_pte_addr(pte)));
+ dma_clear_pte(pte);
+ }
+ pte++;
+ tmp += level_size(level);
+ } while (!first_pte_in_page(pte) &&
+ tmp + level_size(level) - 1 <= last_pfn);
+
+ domain_flush_cache(domain, first_pte,
+ (void *)pte - (void *)first_pte);
+
}
level++;
}
/* free pgd */
- if (start == 0 && end >= ((((u64)1) << addr_width) - 1)) {
+ if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) {
free_pgtable_page(domain->pgd);
domain->pgd = NULL;
}
@@ -809,7 +870,7 @@ static int iommu_alloc_root_entry(struct intel_iommu *iommu)
static void iommu_set_root_entry(struct intel_iommu *iommu)
{
void *addr;
- u32 cmd, sts;
+ u32 sts;
unsigned long flag;
addr = iommu->root_entry;
@@ -817,12 +878,11 @@ static void iommu_set_root_entry(struct intel_iommu *iommu)
spin_lock_irqsave(&iommu->register_lock, flag);
dmar_writeq(iommu->reg + DMAR_RTADDR_REG, virt_to_phys(addr));
- cmd = iommu->gcmd | DMA_GCMD_SRTP;
- writel(cmd, iommu->reg + DMAR_GCMD_REG);
+ writel(iommu->gcmd | DMA_GCMD_SRTP, iommu->reg + DMAR_GCMD_REG);
/* Make sure hardware complete it */
IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
- readl, (sts & DMA_GSTS_RTPS), sts);
+ readl, (sts & DMA_GSTS_RTPS), sts);
spin_unlock_irqrestore(&iommu->register_lock, flag);
}
@@ -834,39 +894,25 @@ static void iommu_flush_write_buffer(struct intel_iommu *iommu)
if (!rwbf_quirk && !cap_rwbf(iommu->cap))
return;
- val = iommu->gcmd | DMA_GCMD_WBF;
spin_lock_irqsave(&iommu->register_lock, flag);
- writel(val, iommu->reg + DMAR_GCMD_REG);
+ writel(iommu->gcmd | DMA_GCMD_WBF, iommu->reg + DMAR_GCMD_REG);
/* Make sure hardware complete it */
IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
- readl, (!(val & DMA_GSTS_WBFS)), val);
+ readl, (!(val & DMA_GSTS_WBFS)), val);
spin_unlock_irqrestore(&iommu->register_lock, flag);
}
/* return value determine if we need a write buffer flush */
-static int __iommu_flush_context(struct intel_iommu *iommu,
- u16 did, u16 source_id, u8 function_mask, u64 type,
- int non_present_entry_flush)
+static void __iommu_flush_context(struct intel_iommu *iommu,
+ u16 did, u16 source_id, u8 function_mask,
+ u64 type)
{
u64 val = 0;
unsigned long flag;
- /*
- * In the non-present entry flush case, if hardware doesn't cache
- * non-present entry we do nothing and if hardware cache non-present
- * entry, we flush entries of domain 0 (the domain id is used to cache
- * any non-present entries)
- */
- if (non_present_entry_flush) {
- if (!cap_caching_mode(iommu->cap))
- return 1;
- else
- did = 0;
- }
-
switch (type) {
case DMA_CCMD_GLOBAL_INVL:
val = DMA_CCMD_GLOBAL_INVL;
@@ -891,33 +937,16 @@ static int __iommu_flush_context(struct intel_iommu *iommu,
dmar_readq, (!(val & DMA_CCMD_ICC)), val);
spin_unlock_irqrestore(&iommu->register_lock, flag);
-
- /* flush context entry will implicitly flush write buffer */
- return 0;
}
/* return value determine if we need a write buffer flush */
-static int __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did,
- u64 addr, unsigned int size_order, u64 type,
- int non_present_entry_flush)
+static void __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did,
+ u64 addr, unsigned int size_order, u64 type)
{
int tlb_offset = ecap_iotlb_offset(iommu->ecap);
u64 val = 0, val_iva = 0;
unsigned long flag;
- /*
- * In the non-present entry flush case, if hardware doesn't cache
- * non-present entry we do nothing and if hardware cache non-present
- * entry, we flush entries of domain 0 (the domain id is used to cache
- * any non-present entries)
- */
- if (non_present_entry_flush) {
- if (!cap_caching_mode(iommu->cap))
- return 1;
- else
- did = 0;
- }
-
switch (type) {
case DMA_TLB_GLOBAL_FLUSH:
/* global flush doesn't need set IVA_REG */
@@ -965,37 +994,106 @@ static int __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did,
pr_debug("IOMMU: tlb flush request %Lx, actual %Lx\n",
(unsigned long long)DMA_TLB_IIRG(type),
(unsigned long long)DMA_TLB_IAIG(val));
- /* flush iotlb entry will implicitly flush write buffer */
- return 0;
}
-static int iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did,
- u64 addr, unsigned int pages, int non_present_entry_flush)
+static struct device_domain_info *iommu_support_dev_iotlb(
+ struct dmar_domain *domain, int segment, u8 bus, u8 devfn)
{
- unsigned int mask;
+ int found = 0;
+ unsigned long flags;
+ struct device_domain_info *info;
+ struct intel_iommu *iommu = device_to_iommu(segment, bus, devfn);
- BUG_ON(addr & (~VTD_PAGE_MASK));
- BUG_ON(pages == 0);
+ if (!ecap_dev_iotlb_support(iommu->ecap))
+ return NULL;
- /* Fallback to domain selective flush if no PSI support */
- if (!cap_pgsel_inv(iommu->cap))
- return iommu->flush.flush_iotlb(iommu, did, 0, 0,
- DMA_TLB_DSI_FLUSH,
- non_present_entry_flush);
+ if (!iommu->qi)
+ return NULL;
+
+ spin_lock_irqsave(&device_domain_lock, flags);
+ list_for_each_entry(info, &domain->devices, link)
+ if (info->bus == bus && info->devfn == devfn) {
+ found = 1;
+ break;
+ }
+ spin_unlock_irqrestore(&device_domain_lock, flags);
+
+ if (!found || !info->dev)
+ return NULL;
+
+ if (!pci_find_ext_capability(info->dev, PCI_EXT_CAP_ID_ATS))
+ return NULL;
+
+ if (!dmar_find_matched_atsr_unit(info->dev))
+ return NULL;
+
+ info->iommu = iommu;
+
+ return info;
+}
+
+static void iommu_enable_dev_iotlb(struct device_domain_info *info)
+{
+ if (!info)
+ return;
+
+ pci_enable_ats(info->dev, VTD_PAGE_SHIFT);
+}
+
+static void iommu_disable_dev_iotlb(struct device_domain_info *info)
+{
+ if (!info->dev || !pci_ats_enabled(info->dev))
+ return;
+
+ pci_disable_ats(info->dev);
+}
+
+static void iommu_flush_dev_iotlb(struct dmar_domain *domain,
+ u64 addr, unsigned mask)
+{
+ u16 sid, qdep;
+ unsigned long flags;
+ struct device_domain_info *info;
+
+ spin_lock_irqsave(&device_domain_lock, flags);
+ list_for_each_entry(info, &domain->devices, link) {
+ if (!info->dev || !pci_ats_enabled(info->dev))
+ continue;
+
+ sid = info->bus << 8 | info->devfn;
+ qdep = pci_ats_queue_depth(info->dev);
+ qi_flush_dev_iotlb(info->iommu, sid, qdep, addr, mask);
+ }
+ spin_unlock_irqrestore(&device_domain_lock, flags);
+}
+
+static void iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did,
+ unsigned long pfn, unsigned int pages)
+{
+ unsigned int mask = ilog2(__roundup_pow_of_two(pages));
+ uint64_t addr = (uint64_t)pfn << VTD_PAGE_SHIFT;
+
+ BUG_ON(pages == 0);
/*
+ * Fallback to domain selective flush if no PSI support or the size is
+ * too big.
* PSI requires page size to be 2 ^ x, and the base address is naturally
* aligned to the size
*/
- mask = ilog2(__roundup_pow_of_two(pages));
- /* Fallback to domain selective flush if size is too big */
- if (mask > cap_max_amask_val(iommu->cap))
- return iommu->flush.flush_iotlb(iommu, did, 0, 0,
- DMA_TLB_DSI_FLUSH, non_present_entry_flush);
+ if (!cap_pgsel_inv(iommu->cap) || mask > cap_max_amask_val(iommu->cap))
+ iommu->flush.flush_iotlb(iommu, did, 0, 0,
+ DMA_TLB_DSI_FLUSH);
+ else
+ iommu->flush.flush_iotlb(iommu, did, addr, mask,
+ DMA_TLB_PSI_FLUSH);
- return iommu->flush.flush_iotlb(iommu, did, addr, mask,
- DMA_TLB_PSI_FLUSH,
- non_present_entry_flush);
+ /*
+ * In caching mode, domain ID 0 is reserved for non-present to present
+ * mapping flush. Device IOTLB doesn't need to be flushed in this case.
+ */
+ if (!cap_caching_mode(iommu->cap) || did)
+ iommu_flush_dev_iotlb(iommu->domains[did], addr, mask);
}
static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu)
@@ -1021,13 +1119,13 @@ static int iommu_enable_translation(struct intel_iommu *iommu)
unsigned long flags;
spin_lock_irqsave(&iommu->register_lock, flags);
- writel(iommu->gcmd|DMA_GCMD_TE, iommu->reg + DMAR_GCMD_REG);
+ iommu->gcmd |= DMA_GCMD_TE;
+ writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
/* Make sure hardware complete it */
IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
- readl, (sts & DMA_GSTS_TES), sts);
+ readl, (sts & DMA_GSTS_TES), sts);
- iommu->gcmd |= DMA_GCMD_TE;
spin_unlock_irqrestore(&iommu->register_lock, flags);
return 0;
}
@@ -1043,7 +1141,7 @@ static int iommu_disable_translation(struct intel_iommu *iommu)
/* Make sure hardware complete it */
IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
- readl, (!(sts & DMA_GSTS_TES)), sts);
+ readl, (!(sts & DMA_GSTS_TES)), sts);
spin_unlock_irqrestore(&iommu->register_lock, flag);
return 0;
@@ -1142,48 +1240,71 @@ void free_dmar_iommu(struct intel_iommu *iommu)
free_context_table(iommu);
}
-static struct dmar_domain * iommu_alloc_domain(struct intel_iommu *iommu)
+static struct dmar_domain *alloc_domain(void)
{
- unsigned long num;
- unsigned long ndomains;
struct dmar_domain *domain;
- unsigned long flags;
domain = alloc_domain_mem();
if (!domain)
return NULL;
+ memset(&domain->iommu_bmp, 0, sizeof(unsigned long));
+ domain->flags = 0;
+
+ return domain;
+}
+
+static int iommu_attach_domain(struct dmar_domain *domain,
+ struct intel_iommu *iommu)
+{
+ int num;
+ unsigned long ndomains;
+ unsigned long flags;
+
ndomains = cap_ndoms(iommu->cap);
spin_lock_irqsave(&iommu->lock, flags);
+
num = find_first_zero_bit(iommu->domain_ids, ndomains);
if (num >= ndomains) {
spin_unlock_irqrestore(&iommu->lock, flags);
- free_domain_mem(domain);
printk(KERN_ERR "IOMMU: no free domain ids\n");
- return NULL;
+ return -ENOMEM;
}
- set_bit(num, iommu->domain_ids);
domain->id = num;
- memset(&domain->iommu_bmp, 0, sizeof(unsigned long));
+ set_bit(num, iommu->domain_ids);
set_bit(iommu->seq_id, &domain->iommu_bmp);
- domain->flags = 0;
iommu->domains[num] = domain;
spin_unlock_irqrestore(&iommu->lock, flags);
- return domain;
+ return 0;
}
-static void iommu_free_domain(struct dmar_domain *domain)
+static void iommu_detach_domain(struct dmar_domain *domain,
+ struct intel_iommu *iommu)
{
unsigned long flags;
- struct intel_iommu *iommu;
-
- iommu = domain_get_iommu(domain);
+ int num, ndomains;
+ int found = 0;
spin_lock_irqsave(&iommu->lock, flags);
- clear_bit(domain->id, iommu->domain_ids);
+ ndomains = cap_ndoms(iommu->cap);
+ num = find_first_bit(iommu->domain_ids, ndomains);
+ for (; num < ndomains; ) {
+ if (iommu->domains[num] == domain) {
+ found = 1;
+ break;
+ }
+ num = find_next_bit(iommu->domain_ids,
+ cap_ndoms(iommu->cap), num+1);
+ }
+
+ if (found) {
+ clear_bit(num, iommu->domain_ids);
+ clear_bit(iommu->seq_id, &domain->iommu_bmp);
+ iommu->domains[num] = NULL;
+ }
spin_unlock_irqrestore(&iommu->lock, flags);
}
@@ -1196,7 +1317,6 @@ static void dmar_init_reserved_ranges(void)
struct pci_dev *pdev = NULL;
struct iova *iova;
int i;
- u64 addr, size;
init_iova_domain(&reserved_iova_list, DMA_32BIT_PFN);
@@ -1219,12 +1339,9 @@ static void dmar_init_reserved_ranges(void)
r = &pdev->resource[i];
if (!r->flags || !(r->flags & IORESOURCE_MEM))
continue;
- addr = r->start;
- addr &= PHYSICAL_PAGE_MASK;
- size = r->end - addr;
- size = PAGE_ALIGN(size);
- iova = reserve_iova(&reserved_iova_list, IOVA_PFN(addr),
- IOVA_PFN(size + addr) - 1);
+ iova = reserve_iova(&reserved_iova_list,
+ IOVA_PFN(r->start),
+ IOVA_PFN(r->end));
if (!iova)
printk(KERN_ERR "Reserve iova failed\n");
}
@@ -1258,7 +1375,6 @@ static int domain_init(struct dmar_domain *domain, int guest_width)
unsigned long sagaw;
init_iova_domain(&domain->iovad, DMA_32BIT_PFN);
- spin_lock_init(&domain->mapping_lock);
spin_lock_init(&domain->iommu_lock);
domain_reserve_special_ranges(domain);
@@ -1303,7 +1419,8 @@ static int domain_init(struct dmar_domain *domain, int guest_width)
static void domain_exit(struct dmar_domain *domain)
{
- u64 end;
+ struct dmar_drhd_unit *drhd;
+ struct intel_iommu *iommu;
/* Domain 0 is reserved, so dont process it */
if (!domain)
@@ -1312,21 +1429,22 @@ static void domain_exit(struct dmar_domain *domain)
domain_remove_dev_info(domain);
/* destroy iovas */
put_iova_domain(&domain->iovad);
- end = DOMAIN_MAX_ADDR(domain->gaw);
- end = end & (~PAGE_MASK);
/* clear ptes */
- dma_pte_clear_range(domain, 0, end);
+ dma_pte_clear_range(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
/* free page tables */
- dma_pte_free_pagetable(domain, 0, end);
+ dma_pte_free_pagetable(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
+
+ for_each_active_iommu(iommu, drhd)
+ if (test_bit(iommu->seq_id, &domain->iommu_bmp))
+ iommu_detach_domain(domain, iommu);
- iommu_free_domain(domain);
free_domain_mem(domain);
}
-static int domain_context_mapping_one(struct dmar_domain *domain,
- int segment, u8 bus, u8 devfn)
+static int domain_context_mapping_one(struct dmar_domain *domain, int segment,
+ u8 bus, u8 devfn, int translation)
{
struct context_entry *context;
unsigned long flags;
@@ -1336,10 +1454,14 @@ static int domain_context_mapping_one(struct dmar_domain *domain,
unsigned long ndomains;
int id;
int agaw;
+ struct device_domain_info *info = NULL;
pr_debug("Set context mapping for %02x:%02x.%d\n",
bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
+
BUG_ON(!domain->pgd);
+ BUG_ON(translation != CONTEXT_TT_PASS_THROUGH &&
+ translation != CONTEXT_TT_MULTI_LEVEL);
iommu = device_to_iommu(segment, bus, devfn);
if (!iommu)
@@ -1357,7 +1479,8 @@ static int domain_context_mapping_one(struct dmar_domain *domain,
id = domain->id;
pgd = domain->pgd;
- if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE) {
+ if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE ||
+ domain->flags & DOMAIN_FLAG_STATIC_IDENTITY) {
int found = 0;
/* find an available domain id for this device in iommu */
@@ -1382,6 +1505,7 @@ static int domain_context_mapping_one(struct dmar_domain *domain,
}
set_bit(num, iommu->domain_ids);
+ set_bit(iommu->seq_id, &domain->iommu_bmp);
iommu->domains[num] = domain;
id = num;
}
@@ -1399,21 +1523,44 @@ static int domain_context_mapping_one(struct dmar_domain *domain,
}
context_set_domain_id(context, id);
- context_set_address_width(context, iommu->agaw);
- context_set_address_root(context, virt_to_phys(pgd));
- context_set_translation_type(context, CONTEXT_TT_MULTI_LEVEL);
+
+ if (translation != CONTEXT_TT_PASS_THROUGH) {
+ info = iommu_support_dev_iotlb(domain, segment, bus, devfn);
+ translation = info ? CONTEXT_TT_DEV_IOTLB :
+ CONTEXT_TT_MULTI_LEVEL;
+ }
+ /*
+ * In pass through mode, AW must be programmed to indicate the largest
+ * AGAW value supported by hardware. And ASR is ignored by hardware.
+ */
+ if (unlikely(translation == CONTEXT_TT_PASS_THROUGH))
+ context_set_address_width(context, iommu->msagaw);
+ else {
+ context_set_address_root(context, virt_to_phys(pgd));
+ context_set_address_width(context, iommu->agaw);
+ }
+
+ context_set_translation_type(context, translation);
context_set_fault_enable(context);
context_set_present(context);
domain_flush_cache(domain, context, sizeof(*context));
- /* it's a non-present to present mapping */
- if (iommu->flush.flush_context(iommu, domain->id,
- (((u16)bus) << 8) | devfn, DMA_CCMD_MASK_NOBIT,
- DMA_CCMD_DEVICE_INVL, 1))
+ /*
+ * It's a non-present to present mapping. If hardware doesn't cache
+ * non-present entry we only need to flush the write-buffer. If the
+ * _does_ cache non-present entries, then it does so in the special
+ * domain #0, which we have to flush:
+ */
+ if (cap_caching_mode(iommu->cap)) {
+ iommu->flush.flush_context(iommu, 0,
+ (((u16)bus) << 8) | devfn,
+ DMA_CCMD_MASK_NOBIT,
+ DMA_CCMD_DEVICE_INVL);
+ iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_DSI_FLUSH);
+ } else {
iommu_flush_write_buffer(iommu);
- else
- iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_DSI_FLUSH, 0);
-
+ }
+ iommu_enable_dev_iotlb(info);
spin_unlock_irqrestore(&iommu->lock, flags);
spin_lock_irqsave(&domain->iommu_lock, flags);
@@ -1426,13 +1573,15 @@ static int domain_context_mapping_one(struct dmar_domain *domain,
}
static int
-domain_context_mapping(struct dmar_domain *domain, struct pci_dev *pdev)
+domain_context_mapping(struct dmar_domain *domain, struct pci_dev *pdev,
+ int translation)
{
int ret;
struct pci_dev *tmp, *parent;
ret = domain_context_mapping_one(domain, pci_domain_nr(pdev->bus),
- pdev->bus->number, pdev->devfn);
+ pdev->bus->number, pdev->devfn,
+ translation);
if (ret)
return ret;
@@ -1446,7 +1595,7 @@ domain_context_mapping(struct dmar_domain *domain, struct pci_dev *pdev)
ret = domain_context_mapping_one(domain,
pci_domain_nr(parent->bus),
parent->bus->number,
- parent->devfn);
+ parent->devfn, translation);
if (ret)
return ret;
parent = parent->bus->self;
@@ -1454,12 +1603,14 @@ domain_context_mapping(struct dmar_domain *domain, struct pci_dev *pdev)
if (tmp->is_pcie) /* this is a PCIE-to-PCI bridge */
return domain_context_mapping_one(domain,
pci_domain_nr(tmp->subordinate),
- tmp->subordinate->number, 0);
+ tmp->subordinate->number, 0,
+ translation);
else /* this is a legacy PCI bridge */
return domain_context_mapping_one(domain,
pci_domain_nr(tmp->bus),
tmp->bus->number,
- tmp->devfn);
+ tmp->devfn,
+ translation);
}
static int domain_context_mapped(struct pci_dev *pdev)
@@ -1497,42 +1648,86 @@ static int domain_context_mapped(struct pci_dev *pdev)
tmp->devfn);
}
-static int
-domain_page_mapping(struct dmar_domain *domain, dma_addr_t iova,
- u64 hpa, size_t size, int prot)
+static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
+ struct scatterlist *sg, unsigned long phys_pfn,
+ unsigned long nr_pages, int prot)
{
- u64 start_pfn, end_pfn;
- struct dma_pte *pte;
- int index;
- int addr_width = agaw_to_width(domain->agaw);
+ struct dma_pte *first_pte = NULL, *pte = NULL;
+ phys_addr_t uninitialized_var(pteval);
+ int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
+ unsigned long sg_res;
- hpa &= (((u64)1) << addr_width) - 1;
+ BUG_ON(addr_width < BITS_PER_LONG && (iov_pfn + nr_pages - 1) >> addr_width);
if ((prot & (DMA_PTE_READ|DMA_PTE_WRITE)) == 0)
return -EINVAL;
- iova &= PAGE_MASK;
- start_pfn = ((u64)hpa) >> VTD_PAGE_SHIFT;
- end_pfn = (VTD_PAGE_ALIGN(((u64)hpa) + size)) >> VTD_PAGE_SHIFT;
- index = 0;
- while (start_pfn < end_pfn) {
- pte = addr_to_dma_pte(domain, iova + VTD_PAGE_SIZE * index);
- if (!pte)
- return -ENOMEM;
+
+ prot &= DMA_PTE_READ | DMA_PTE_WRITE | DMA_PTE_SNP;
+
+ if (sg)
+ sg_res = 0;
+ else {
+ sg_res = nr_pages + 1;
+ pteval = ((phys_addr_t)phys_pfn << VTD_PAGE_SHIFT) | prot;
+ }
+
+ while (nr_pages--) {
+ uint64_t tmp;
+
+ if (!sg_res) {
+ sg_res = (sg->offset + sg->length + VTD_PAGE_SIZE - 1) >> VTD_PAGE_SHIFT;
+ sg->dma_address = ((dma_addr_t)iov_pfn << VTD_PAGE_SHIFT) + sg->offset;
+ sg->dma_length = sg->length;
+ pteval = page_to_phys(sg_page(sg)) | prot;
+ }
+ if (!pte) {
+ first_pte = pte = pfn_to_dma_pte(domain, iov_pfn);
+ if (!pte)
+ return -ENOMEM;
+ }
/* We don't need lock here, nobody else
* touches the iova range
*/
- BUG_ON(dma_pte_addr(pte));
- dma_set_pte_addr(pte, start_pfn << VTD_PAGE_SHIFT);
- dma_set_pte_prot(pte, prot);
- if (prot & DMA_PTE_SNP)
- dma_set_pte_snp(pte);
- domain_flush_cache(domain, pte, sizeof(*pte));
- start_pfn++;
- index++;
+ tmp = cmpxchg64_local(&pte->val, 0ULL, pteval);
+ if (tmp) {
+ static int dumps = 5;
+ printk(KERN_CRIT "ERROR: DMA PTE for vPFN 0x%lx already set (to %llx not %llx)\n",
+ iov_pfn, tmp, (unsigned long long)pteval);
+ if (dumps) {
+ dumps--;
+ debug_dma_dump_mappings(NULL);
+ }
+ WARN_ON(1);
+ }
+ pte++;
+ if (!nr_pages || first_pte_in_page(pte)) {
+ domain_flush_cache(domain, first_pte,
+ (void *)pte - (void *)first_pte);
+ pte = NULL;
+ }
+ iov_pfn++;
+ pteval += VTD_PAGE_SIZE;
+ sg_res--;
+ if (!sg_res)
+ sg = sg_next(sg);
}
return 0;
}
+static inline int domain_sg_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
+ struct scatterlist *sg, unsigned long nr_pages,
+ int prot)
+{
+ return __domain_mapping(domain, iov_pfn, sg, 0, nr_pages, prot);
+}
+
+static inline int domain_pfn_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
+ unsigned long phys_pfn, unsigned long nr_pages,
+ int prot)
+{
+ return __domain_mapping(domain, iov_pfn, NULL, phys_pfn, nr_pages, prot);
+}
+
static void iommu_detach_dev(struct intel_iommu *iommu, u8 bus, u8 devfn)
{
if (!iommu)
@@ -1540,9 +1735,8 @@ static void iommu_detach_dev(struct intel_iommu *iommu, u8 bus, u8 devfn)
clear_context_table(iommu, bus, devfn);
iommu->flush.flush_context(iommu, 0, 0, 0,
- DMA_CCMD_GLOBAL_INVL, 0);
- iommu->flush.flush_iotlb(iommu, 0, 0, 0,
- DMA_TLB_GLOBAL_FLUSH, 0);
+ DMA_CCMD_GLOBAL_INVL);
+ iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
}
static void domain_remove_dev_info(struct dmar_domain *domain)
@@ -1561,6 +1755,7 @@ static void domain_remove_dev_info(struct dmar_domain *domain)
info->dev->dev.archdata.iommu = NULL;
spin_unlock_irqrestore(&device_domain_lock, flags);
+ iommu_disable_dev_iotlb(info);
iommu = device_to_iommu(info->segment, info->bus, info->devfn);
iommu_detach_dev(iommu, info->bus, info->devfn);
free_devinfo_mem(info);
@@ -1597,6 +1792,7 @@ static struct dmar_domain *get_domain_for_dev(struct pci_dev *pdev, int gaw)
unsigned long flags;
int bus = 0, devfn = 0;
int segment;
+ int ret;
domain = find_domain(pdev);
if (domain)
@@ -1629,6 +1825,10 @@ static struct dmar_domain *get_domain_for_dev(struct pci_dev *pdev, int gaw)
}
}
+ domain = alloc_domain();
+ if (!domain)
+ goto error;
+
/* Allocate new domain for the device */
drhd = dmar_find_matched_drhd_unit(pdev);
if (!drhd) {
@@ -1638,9 +1838,11 @@ static struct dmar_domain *get_domain_for_dev(struct pci_dev *pdev, int gaw)
}
iommu = drhd->iommu;
- domain = iommu_alloc_domain(iommu);
- if (!domain)
+ ret = iommu_attach_domain(domain, iommu);
+ if (ret) {
+ domain_exit(domain);
goto error;
+ }
if (domain_init(domain, gaw)) {
domain_exit(domain);
@@ -1714,55 +1916,63 @@ error:
return find_domain(pdev);
}
+static int iommu_identity_mapping;
+
+static int iommu_domain_identity_map(struct dmar_domain *domain,
+ unsigned long long start,
+ unsigned long long end)
+{
+ unsigned long first_vpfn = start >> VTD_PAGE_SHIFT;
+ unsigned long last_vpfn = end >> VTD_PAGE_SHIFT;
+
+ if (!reserve_iova(&domain->iovad, dma_to_mm_pfn(first_vpfn),
+ dma_to_mm_pfn(last_vpfn))) {
+ printk(KERN_ERR "IOMMU: reserve iova failed\n");
+ return -ENOMEM;
+ }
+
+ pr_debug("Mapping reserved region %llx-%llx for domain %d\n",
+ start, end, domain->id);
+ /*
+ * RMRR range might have overlap with physical memory range,
+ * clear it first
+ */
+ dma_pte_clear_range(domain, first_vpfn, last_vpfn);
+
+ return domain_pfn_mapping(domain, first_vpfn, first_vpfn,
+ last_vpfn - first_vpfn + 1,
+ DMA_PTE_READ|DMA_PTE_WRITE);
+}
+
static int iommu_prepare_identity_map(struct pci_dev *pdev,
unsigned long long start,
unsigned long long end)
{
struct dmar_domain *domain;
- unsigned long size;
- unsigned long long base;
int ret;
printk(KERN_INFO
- "IOMMU: Setting identity map for device %s [0x%Lx - 0x%Lx]\n",
- pci_name(pdev), start, end);
- /* page table init */
+ "IOMMU: Setting identity map for device %s [0x%Lx - 0x%Lx]\n",
+ pci_name(pdev), start, end);
+
domain = get_domain_for_dev(pdev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
if (!domain)
return -ENOMEM;
- /* The address might not be aligned */
- base = start & PAGE_MASK;
- size = end - base;
- size = PAGE_ALIGN(size);
- if (!reserve_iova(&domain->iovad, IOVA_PFN(base),
- IOVA_PFN(base + size) - 1)) {
- printk(KERN_ERR "IOMMU: reserve iova failed\n");
- ret = -ENOMEM;
+ ret = iommu_domain_identity_map(domain, start, end);
+ if (ret)
goto error;
- }
-
- pr_debug("Mapping reserved region %lx@%llx for %s\n",
- size, base, pci_name(pdev));
- /*
- * RMRR range might have overlap with physical memory range,
- * clear it first
- */
- dma_pte_clear_range(domain, base, base + size);
- ret = domain_page_mapping(domain, base, base, size,
- DMA_PTE_READ|DMA_PTE_WRITE);
+ /* context entry init */
+ ret = domain_context_mapping(domain, pdev, CONTEXT_TT_MULTI_LEVEL);
if (ret)
goto error;
- /* context entry init */
- ret = domain_context_mapping(domain, pdev);
- if (!ret)
- return 0;
-error:
+ return 0;
+
+ error:
domain_exit(domain);
return ret;
-
}
static inline int iommu_prepare_rmrr_dev(struct dmar_rmrr_unit *rmrr,
@@ -1774,96 +1984,222 @@ static inline int iommu_prepare_rmrr_dev(struct dmar_rmrr_unit *rmrr,
rmrr->end_address + 1);
}
-#ifdef CONFIG_DMAR_GFX_WA
-struct iommu_prepare_data {
+#ifdef CONFIG_DMAR_FLOPPY_WA
+static inline void iommu_prepare_isa(void)
+{
struct pci_dev *pdev;
int ret;
-};
-static int __init iommu_prepare_work_fn(unsigned long start_pfn,
- unsigned long end_pfn, void *datax)
-{
- struct iommu_prepare_data *data;
+ pdev = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL);
+ if (!pdev)
+ return;
- data = (struct iommu_prepare_data *)datax;
+ printk(KERN_INFO "IOMMU: Prepare 0-16MiB unity mapping for LPC\n");
+ ret = iommu_prepare_identity_map(pdev, 0, 16*1024*1024);
- data->ret = iommu_prepare_identity_map(data->pdev,
- start_pfn<<PAGE_SHIFT, end_pfn<<PAGE_SHIFT);
- return data->ret;
+ if (ret)
+ printk(KERN_ERR "IOMMU: Failed to create 0-16MiB identity map; "
+ "floppy might not work\n");
}
-
-static int __init iommu_prepare_with_active_regions(struct pci_dev *pdev)
+#else
+static inline void iommu_prepare_isa(void)
{
- int nid;
- struct iommu_prepare_data data;
-
- data.pdev = pdev;
- data.ret = 0;
-
- for_each_online_node(nid) {
- work_with_active_regions(nid, iommu_prepare_work_fn, &data);
- if (data.ret)
- return data.ret;
- }
- return data.ret;
+ return;
}
+#endif /* !CONFIG_DMAR_FLPY_WA */
-static void __init iommu_prepare_gfx_mapping(void)
+/* Initialize each context entry as pass through.*/
+static int __init init_context_pass_through(void)
{
struct pci_dev *pdev = NULL;
+ struct dmar_domain *domain;
int ret;
for_each_pci_dev(pdev) {
- if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO ||
- !IS_GFX_DEVICE(pdev))
- continue;
- printk(KERN_INFO "IOMMU: gfx device %s 1-1 mapping\n",
- pci_name(pdev));
- ret = iommu_prepare_with_active_regions(pdev);
+ domain = get_domain_for_dev(pdev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
+ ret = domain_context_mapping(domain, pdev,
+ CONTEXT_TT_PASS_THROUGH);
if (ret)
- printk(KERN_ERR "IOMMU: mapping reserved region failed\n");
+ return ret;
}
+ return 0;
}
-#else /* !CONFIG_DMAR_GFX_WA */
-static inline void iommu_prepare_gfx_mapping(void)
+
+static int md_domain_init(struct dmar_domain *domain, int guest_width);
+
+static int __init si_domain_work_fn(unsigned long start_pfn,
+ unsigned long end_pfn, void *datax)
{
- return;
+ int *ret = datax;
+
+ *ret = iommu_domain_identity_map(si_domain,
+ (uint64_t)start_pfn << PAGE_SHIFT,
+ (uint64_t)end_pfn << PAGE_SHIFT);
+ return *ret;
+
}
-#endif
-#ifdef CONFIG_DMAR_FLOPPY_WA
-static inline void iommu_prepare_isa(void)
+static int si_domain_init(void)
{
- struct pci_dev *pdev;
- int ret;
+ struct dmar_drhd_unit *drhd;
+ struct intel_iommu *iommu;
+ int nid, ret = 0;
- pdev = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL);
- if (!pdev)
- return;
+ si_domain = alloc_domain();
+ if (!si_domain)
+ return -EFAULT;
- printk(KERN_INFO "IOMMU: Prepare 0-16M unity mapping for LPC\n");
- ret = iommu_prepare_identity_map(pdev, 0, 16*1024*1024);
+ pr_debug("Identity mapping domain is domain %d\n", si_domain->id);
- if (ret)
- printk(KERN_ERR "IOMMU: Failed to create 0-64M identity map, "
- "floppy might not work\n");
+ for_each_active_iommu(iommu, drhd) {
+ ret = iommu_attach_domain(si_domain, iommu);
+ if (ret) {
+ domain_exit(si_domain);
+ return -EFAULT;
+ }
+ }
+
+ if (md_domain_init(si_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
+ domain_exit(si_domain);
+ return -EFAULT;
+ }
+ si_domain->flags = DOMAIN_FLAG_STATIC_IDENTITY;
+
+ for_each_online_node(nid) {
+ work_with_active_regions(nid, si_domain_work_fn, &ret);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
}
-#else
-static inline void iommu_prepare_isa(void)
+
+static void domain_remove_one_dev_info(struct dmar_domain *domain,
+ struct pci_dev *pdev);
+static int identity_mapping(struct pci_dev *pdev)
{
- return;
+ struct device_domain_info *info;
+
+ if (likely(!iommu_identity_mapping))
+ return 0;
+
+
+ list_for_each_entry(info, &si_domain->devices, link)
+ if (info->dev == pdev)
+ return 1;
+ return 0;
}
-#endif /* !CONFIG_DMAR_FLPY_WA */
-static int __init init_dmars(void)
+static int domain_add_dev_info(struct dmar_domain *domain,
+ struct pci_dev *pdev)
+{
+ struct device_domain_info *info;
+ unsigned long flags;
+
+ info = alloc_devinfo_mem();
+ if (!info)
+ return -ENOMEM;
+
+ info->segment = pci_domain_nr(pdev->bus);
+ info->bus = pdev->bus->number;
+ info->devfn = pdev->devfn;
+ info->dev = pdev;
+ info->domain = domain;
+
+ spin_lock_irqsave(&device_domain_lock, flags);
+ list_add(&info->link, &domain->devices);
+ list_add(&info->global, &device_domain_list);
+ pdev->dev.archdata.iommu = info;
+ spin_unlock_irqrestore(&device_domain_lock, flags);
+
+ return 0;
+}
+
+static int iommu_should_identity_map(struct pci_dev *pdev, int startup)
+{
+ if (iommu_identity_mapping == 2)
+ return IS_GFX_DEVICE(pdev);
+
+ /*
+ * We want to start off with all devices in the 1:1 domain, and
+ * take them out later if we find they can't access all of memory.
+ *
+ * However, we can't do this for PCI devices behind bridges,
+ * because all PCI devices behind the same bridge will end up
+ * with the same source-id on their transactions.
+ *
+ * Practically speaking, we can't change things around for these
+ * devices at run-time, because we can't be sure there'll be no
+ * DMA transactions in flight for any of their siblings.
+ *
+ * So PCI devices (unless they're on the root bus) as well as
+ * their parent PCI-PCI or PCIe-PCI bridges must be left _out_ of
+ * the 1:1 domain, just in _case_ one of their siblings turns out
+ * not to be able to map all of memory.
+ */
+ if (!pdev->is_pcie) {
+ if (!pci_is_root_bus(pdev->bus))
+ return 0;
+ if (pdev->class >> 8 == PCI_CLASS_BRIDGE_PCI)
+ return 0;
+ } else if (pdev->pcie_type == PCI_EXP_TYPE_PCI_BRIDGE)
+ return 0;
+
+ /*
+ * At boot time, we don't yet know if devices will be 64-bit capable.
+ * Assume that they will -- if they turn out not to be, then we can
+ * take them out of the 1:1 domain later.
+ */
+ if (!startup)
+ return pdev->dma_mask > DMA_BIT_MASK(32);
+
+ return 1;
+}
+
+static int iommu_prepare_static_identity_mapping(void)
+{
+ struct pci_dev *pdev = NULL;
+ int ret;
+
+ ret = si_domain_init();
+ if (ret)
+ return -EFAULT;
+
+ for_each_pci_dev(pdev) {
+ if (iommu_should_identity_map(pdev, 1)) {
+ printk(KERN_INFO "IOMMU: identity mapping for device %s\n",
+ pci_name(pdev));
+
+ ret = domain_context_mapping(si_domain, pdev,
+ CONTEXT_TT_MULTI_LEVEL);
+ if (ret)
+ return ret;
+ ret = domain_add_dev_info(si_domain, pdev);
+ if (ret)
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+int __init init_dmars(void)
{
struct dmar_drhd_unit *drhd;
struct dmar_rmrr_unit *rmrr;
struct pci_dev *pdev;
struct intel_iommu *iommu;
int i, ret;
+ int pass_through = 1;
+
+ /*
+ * In case pass through can not be enabled, iommu tries to use identity
+ * mapping.
+ */
+ if (iommu_pass_through)
+ iommu_identity_mapping = 1;
/*
* for each drhd
@@ -1917,7 +2253,15 @@ static int __init init_dmars(void)
printk(KERN_ERR "IOMMU: allocate root entry failed\n");
goto error;
}
+ if (!ecap_pass_through(iommu->ecap))
+ pass_through = 0;
}
+ if (iommu_pass_through)
+ if (!pass_through) {
+ printk(KERN_INFO
+ "Pass Through is not supported by hardware.\n");
+ iommu_pass_through = 0;
+ }
/*
* Start from the sane iommu hardware state.
@@ -1973,35 +2317,62 @@ static int __init init_dmars(void)
}
/*
- * For each rmrr
- * for each dev attached to rmrr
- * do
- * locate drhd for dev, alloc domain for dev
- * allocate free domain
- * allocate page table entries for rmrr
- * if context not allocated for bus
- * allocate and init context
- * set present in root table for this bus
- * init context with domain, translation etc
- * endfor
- * endfor
+ * If pass through is set and enabled, context entries of all pci
+ * devices are intialized by pass through translation type.
*/
- for_each_rmrr_units(rmrr) {
- for (i = 0; i < rmrr->devices_cnt; i++) {
- pdev = rmrr->devices[i];
- /* some BIOS lists non-exist devices in DMAR table */
- if (!pdev)
- continue;
- ret = iommu_prepare_rmrr_dev(rmrr, pdev);
- if (ret)
- printk(KERN_ERR
- "IOMMU: mapping reserved region failed\n");
+ if (iommu_pass_through) {
+ ret = init_context_pass_through();
+ if (ret) {
+ printk(KERN_ERR "IOMMU: Pass through init failed.\n");
+ iommu_pass_through = 0;
}
}
- iommu_prepare_gfx_mapping();
+ /*
+ * If pass through is not set or not enabled, setup context entries for
+ * identity mappings for rmrr, gfx, and isa and may fall back to static
+ * identity mapping if iommu_identity_mapping is set.
+ */
+ if (!iommu_pass_through) {
+#ifdef CONFIG_DMAR_BROKEN_GFX_WA
+ if (!iommu_identity_mapping)
+ iommu_identity_mapping = 2;
+#endif
+ if (iommu_identity_mapping)
+ iommu_prepare_static_identity_mapping();
+ /*
+ * For each rmrr
+ * for each dev attached to rmrr
+ * do
+ * locate drhd for dev, alloc domain for dev
+ * allocate free domain
+ * allocate page table entries for rmrr
+ * if context not allocated for bus
+ * allocate and init context
+ * set present in root table for this bus
+ * init context with domain, translation etc
+ * endfor
+ * endfor
+ */
+ printk(KERN_INFO "IOMMU: Setting RMRR:\n");
+ for_each_rmrr_units(rmrr) {
+ for (i = 0; i < rmrr->devices_cnt; i++) {
+ pdev = rmrr->devices[i];
+ /*
+ * some BIOS lists non-exist devices in DMAR
+ * table.
+ */
+ if (!pdev)
+ continue;
+ ret = iommu_prepare_rmrr_dev(rmrr, pdev);
+ if (ret)
+ printk(KERN_ERR
+ "IOMMU: mapping reserved region failed\n");
+ }
+ }
- iommu_prepare_isa();
+ iommu_prepare_isa();
+ }
/*
* for each drhd
@@ -2023,10 +2394,8 @@ static int __init init_dmars(void)
iommu_set_root_entry(iommu);
- iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL,
- 0);
- iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH,
- 0);
+ iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
+ iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
iommu_disable_protect_mem_regions(iommu);
ret = iommu_enable_translation(iommu);
@@ -2046,50 +2415,40 @@ error:
return ret;
}
-static inline u64 aligned_size(u64 host_addr, size_t size)
+/* Returns a number of VTD pages, but aligned to MM page size */
+static inline unsigned long aligned_nrpages(unsigned long host_addr,
+ size_t size)
{
- u64 addr;
- addr = (host_addr & (~PAGE_MASK)) + size;
- return PAGE_ALIGN(addr);
+ host_addr &= ~PAGE_MASK;
+ return PAGE_ALIGN(host_addr + size) >> VTD_PAGE_SHIFT;
}
-struct iova *
-iommu_alloc_iova(struct dmar_domain *domain, size_t size, u64 end)
-{
- struct iova *piova;
-
- /* Make sure it's in range */
- end = min_t(u64, DOMAIN_MAX_ADDR(domain->gaw), end);
- if (!size || (IOVA_START_ADDR + size > end))
- return NULL;
-
- piova = alloc_iova(&domain->iovad,
- size >> PAGE_SHIFT, IOVA_PFN(end), 1);
- return piova;
-}
-
-static struct iova *
-__intel_alloc_iova(struct device *dev, struct dmar_domain *domain,
- size_t size, u64 dma_mask)
+/* This takes a number of _MM_ pages, not VTD pages */
+static struct iova *intel_alloc_iova(struct device *dev,
+ struct dmar_domain *domain,
+ unsigned long nrpages, uint64_t dma_mask)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct iova *iova = NULL;
- if (dma_mask <= DMA_BIT_MASK(32) || dmar_forcedac)
- iova = iommu_alloc_iova(domain, size, dma_mask);
- else {
+ /* Restrict dma_mask to the width that the iommu can handle */
+ dma_mask = min_t(uint64_t, DOMAIN_MAX_ADDR(domain->gaw), dma_mask);
+
+ if (!dmar_forcedac && dma_mask > DMA_BIT_MASK(32)) {
/*
* First try to allocate an io virtual address in
* DMA_BIT_MASK(32) and if that fails then try allocating
* from higher range
*/
- iova = iommu_alloc_iova(domain, size, DMA_BIT_MASK(32));
- if (!iova)
- iova = iommu_alloc_iova(domain, size, dma_mask);
- }
-
- if (!iova) {
- printk(KERN_ERR"Allocating iova for %s failed", pci_name(pdev));
+ iova = alloc_iova(&domain->iovad, nrpages,
+ IOVA_PFN(DMA_BIT_MASK(32)), 1);
+ if (iova)
+ return iova;
+ }
+ iova = alloc_iova(&domain->iovad, nrpages, IOVA_PFN(dma_mask), 1);
+ if (unlikely(!iova)) {
+ printk(KERN_ERR "Allocating %ld-page iova for %s failed",
+ nrpages, pci_name(pdev));
return NULL;
}
@@ -2112,7 +2471,8 @@ get_valid_domain_for_dev(struct pci_dev *pdev)
/* make sure context mapping is ok */
if (unlikely(!domain_context_mapped(pdev))) {
- ret = domain_context_mapping(domain, pdev);
+ ret = domain_context_mapping(domain, pdev,
+ CONTEXT_TT_MULTI_LEVEL);
if (ret) {
printk(KERN_ERR
"Domain context map for %s failed",
@@ -2124,6 +2484,63 @@ get_valid_domain_for_dev(struct pci_dev *pdev)
return domain;
}
+static int iommu_dummy(struct pci_dev *pdev)
+{
+ return pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO;
+}
+
+/* Check if the pdev needs to go through non-identity map and unmap process.*/
+static int iommu_no_mapping(struct device *dev)
+{
+ struct pci_dev *pdev;
+ int found;
+
+ if (unlikely(dev->bus != &pci_bus_type))
+ return 1;
+
+ pdev = to_pci_dev(dev);
+ if (iommu_dummy(pdev))
+ return 1;
+
+ if (!iommu_identity_mapping)
+ return 0;
+
+ found = identity_mapping(pdev);
+ if (found) {
+ if (iommu_should_identity_map(pdev, 0))
+ return 1;
+ else {
+ /*
+ * 32 bit DMA is removed from si_domain and fall back
+ * to non-identity mapping.
+ */
+ domain_remove_one_dev_info(si_domain, pdev);
+ printk(KERN_INFO "32bit %s uses non-identity mapping\n",
+ pci_name(pdev));
+ return 0;
+ }
+ } else {
+ /*
+ * In case of a detached 64 bit DMA device from vm, the device
+ * is put into si_domain for identity mapping.
+ */
+ if (iommu_should_identity_map(pdev, 0)) {
+ int ret;
+ ret = domain_add_dev_info(si_domain, pdev);
+ if (ret)
+ return 0;
+ ret = domain_context_mapping(si_domain, pdev, CONTEXT_TT_MULTI_LEVEL);
+ if (!ret) {
+ printk(KERN_INFO "64bit %s uses identity mapping\n",
+ pci_name(pdev));
+ return 1;
+ }
+ }
+ }
+
+ return 0;
+}
+
static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr,
size_t size, int dir, u64 dma_mask)
{
@@ -2136,7 +2553,8 @@ static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr,
struct intel_iommu *iommu;
BUG_ON(dir == DMA_NONE);
- if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO)
+
+ if (iommu_no_mapping(hwdev))
return paddr;
domain = get_valid_domain_for_dev(pdev);
@@ -2144,14 +2562,13 @@ static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr,
return 0;
iommu = domain_get_iommu(domain);
- size = aligned_size((u64)paddr, size);
+ size = aligned_nrpages(paddr, size);
- iova = __intel_alloc_iova(hwdev, domain, size, pdev->dma_mask);
+ iova = intel_alloc_iova(hwdev, domain, dma_to_mm_pfn(size),
+ pdev->dma_mask);
if (!iova)
goto error;
- start_paddr = (phys_addr_t)iova->pfn_lo << PAGE_SHIFT;
-
/*
* Check if DMAR supports zero-length reads on write only
* mappings..
@@ -2167,19 +2584,20 @@ static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr,
* might have two guest_addr mapping to the same host paddr, but this
* is not a big problem
*/
- ret = domain_page_mapping(domain, start_paddr,
- ((u64)paddr) & PHYSICAL_PAGE_MASK,
- size, prot);
+ ret = domain_pfn_mapping(domain, mm_to_dma_pfn(iova->pfn_lo),
+ paddr >> VTD_PAGE_SHIFT, size, prot);
if (ret)
goto error;
- /* it's a non-present to present mapping */
- ret = iommu_flush_iotlb_psi(iommu, domain->id,
- start_paddr, size >> VTD_PAGE_SHIFT, 1);
- if (ret)
+ /* it's a non-present to present mapping. Only flush if caching mode */
+ if (cap_caching_mode(iommu->cap))
+ iommu_flush_iotlb_psi(iommu, 0, mm_to_dma_pfn(iova->pfn_lo), size);
+ else
iommu_flush_write_buffer(iommu);
- return start_paddr + ((u64)paddr & (~PAGE_MASK));
+ start_paddr = (phys_addr_t)iova->pfn_lo << PAGE_SHIFT;
+ start_paddr += paddr & ~PAGE_MASK;
+ return start_paddr;
error:
if (iova)
@@ -2210,15 +2628,22 @@ static void flush_unmaps(void)
if (!iommu)
continue;
- if (deferred_flush[i].next) {
- iommu->flush.flush_iotlb(iommu, 0, 0, 0,
- DMA_TLB_GLOBAL_FLUSH, 0);
- for (j = 0; j < deferred_flush[i].next; j++) {
- __free_iova(&deferred_flush[i].domain[j]->iovad,
- deferred_flush[i].iova[j]);
- }
- deferred_flush[i].next = 0;
+ if (!deferred_flush[i].next)
+ continue;
+
+ iommu->flush.flush_iotlb(iommu, 0, 0, 0,
+ DMA_TLB_GLOBAL_FLUSH);
+ for (j = 0; j < deferred_flush[i].next; j++) {
+ unsigned long mask;
+ struct iova *iova = deferred_flush[i].iova[j];
+
+ mask = (iova->pfn_hi - iova->pfn_lo + 1) << PAGE_SHIFT;
+ mask = ilog2(mask >> VTD_PAGE_SHIFT);
+ iommu_flush_dev_iotlb(deferred_flush[i].domain[j],
+ iova->pfn_lo << PAGE_SHIFT, mask);
+ __free_iova(&deferred_flush[i].domain[j]->iovad, iova);
}
+ deferred_flush[i].next = 0;
}
list_size = 0;
@@ -2265,35 +2690,38 @@ static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
{
struct pci_dev *pdev = to_pci_dev(dev);
struct dmar_domain *domain;
- unsigned long start_addr;
+ unsigned long start_pfn, last_pfn;
struct iova *iova;
struct intel_iommu *iommu;
- if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO)
+ if (iommu_no_mapping(dev))
return;
+
domain = find_domain(pdev);
BUG_ON(!domain);
iommu = domain_get_iommu(domain);
iova = find_iova(&domain->iovad, IOVA_PFN(dev_addr));
- if (!iova)
+ if (WARN_ONCE(!iova, "Driver unmaps unmatched page at PFN %llx\n",
+ (unsigned long long)dev_addr))
return;
- start_addr = iova->pfn_lo << PAGE_SHIFT;
- size = aligned_size((u64)dev_addr, size);
+ start_pfn = mm_to_dma_pfn(iova->pfn_lo);
+ last_pfn = mm_to_dma_pfn(iova->pfn_hi + 1) - 1;
- pr_debug("Device %s unmapping: %zx@%llx\n",
- pci_name(pdev), size, (unsigned long long)start_addr);
+ pr_debug("Device %s unmapping: pfn %lx-%lx\n",
+ pci_name(pdev), start_pfn, last_pfn);
/* clear the whole page */
- dma_pte_clear_range(domain, start_addr, start_addr + size);
+ dma_pte_clear_range(domain, start_pfn, last_pfn);
+
/* free page tables */
- dma_pte_free_pagetable(domain, start_addr, start_addr + size);
+ dma_pte_free_pagetable(domain, start_pfn, last_pfn);
+
if (intel_iommu_strict) {
- if (iommu_flush_iotlb_psi(iommu,
- domain->id, start_addr, size >> VTD_PAGE_SHIFT, 0))
- iommu_flush_write_buffer(iommu);
+ iommu_flush_iotlb_psi(iommu, domain->id, start_pfn,
+ last_pfn - start_pfn + 1);
/* free iova */
__free_iova(&domain->iovad, iova);
} else {
@@ -2351,17 +2779,13 @@ static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
int nelems, enum dma_data_direction dir,
struct dma_attrs *attrs)
{
- int i;
struct pci_dev *pdev = to_pci_dev(hwdev);
struct dmar_domain *domain;
- unsigned long start_addr;
+ unsigned long start_pfn, last_pfn;
struct iova *iova;
- size_t size = 0;
- phys_addr_t addr;
- struct scatterlist *sg;
struct intel_iommu *iommu;
- if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO)
+ if (iommu_no_mapping(hwdev))
return;
domain = find_domain(pdev);
@@ -2370,23 +2794,21 @@ static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
iommu = domain_get_iommu(domain);
iova = find_iova(&domain->iovad, IOVA_PFN(sglist[0].dma_address));
- if (!iova)
+ if (WARN_ONCE(!iova, "Driver unmaps unmatched sglist at PFN %llx\n",
+ (unsigned long long)sglist[0].dma_address))
return;
- for_each_sg(sglist, sg, nelems, i) {
- addr = page_to_phys(sg_page(sg)) + sg->offset;
- size += aligned_size((u64)addr, sg->length);
- }
- start_addr = iova->pfn_lo << PAGE_SHIFT;
+ start_pfn = mm_to_dma_pfn(iova->pfn_lo);
+ last_pfn = mm_to_dma_pfn(iova->pfn_hi + 1) - 1;
/* clear the whole page */
- dma_pte_clear_range(domain, start_addr, start_addr + size);
+ dma_pte_clear_range(domain, start_pfn, last_pfn);
+
/* free page tables */
- dma_pte_free_pagetable(domain, start_addr, start_addr + size);
+ dma_pte_free_pagetable(domain, start_pfn, last_pfn);
- if (iommu_flush_iotlb_psi(iommu, domain->id, start_addr,
- size >> VTD_PAGE_SHIFT, 0))
- iommu_flush_write_buffer(iommu);
+ iommu_flush_iotlb_psi(iommu, domain->id, start_pfn,
+ (last_pfn - start_pfn + 1));
/* free iova */
__free_iova(&domain->iovad, iova);
@@ -2409,21 +2831,20 @@ static int intel_nontranslate_map_sg(struct device *hddev,
static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems,
enum dma_data_direction dir, struct dma_attrs *attrs)
{
- phys_addr_t addr;
int i;
struct pci_dev *pdev = to_pci_dev(hwdev);
struct dmar_domain *domain;
size_t size = 0;
int prot = 0;
- size_t offset = 0;
+ size_t offset_pfn = 0;
struct iova *iova = NULL;
int ret;
struct scatterlist *sg;
- unsigned long start_addr;
+ unsigned long start_vpfn;
struct intel_iommu *iommu;
BUG_ON(dir == DMA_NONE);
- if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO)
+ if (iommu_no_mapping(hwdev))
return intel_nontranslate_map_sg(hwdev, sglist, nelems, dir);
domain = get_valid_domain_for_dev(pdev);
@@ -2432,12 +2853,11 @@ static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int ne
iommu = domain_get_iommu(domain);
- for_each_sg(sglist, sg, nelems, i) {
- addr = page_to_phys(sg_page(sg)) + sg->offset;
- size += aligned_size((u64)addr, sg->length);
- }
+ for_each_sg(sglist, sg, nelems, i)
+ size += aligned_nrpages(sg->offset, sg->length);
- iova = __intel_alloc_iova(hwdev, domain, size, pdev->dma_mask);
+ iova = intel_alloc_iova(hwdev, domain, dma_to_mm_pfn(size),
+ pdev->dma_mask);
if (!iova) {
sglist->dma_length = 0;
return 0;
@@ -2453,35 +2873,27 @@ static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int ne
if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
prot |= DMA_PTE_WRITE;
- start_addr = iova->pfn_lo << PAGE_SHIFT;
- offset = 0;
- for_each_sg(sglist, sg, nelems, i) {
- addr = page_to_phys(sg_page(sg)) + sg->offset;
- size = aligned_size((u64)addr, sg->length);
- ret = domain_page_mapping(domain, start_addr + offset,
- ((u64)addr) & PHYSICAL_PAGE_MASK,
- size, prot);
- if (ret) {
- /* clear the page */
- dma_pte_clear_range(domain, start_addr,
- start_addr + offset);
- /* free page tables */
- dma_pte_free_pagetable(domain, start_addr,
- start_addr + offset);
- /* free iova */
- __free_iova(&domain->iovad, iova);
- return 0;
- }
- sg->dma_address = start_addr + offset +
- ((u64)addr & (~PAGE_MASK));
- sg->dma_length = sg->length;
- offset += size;
+ start_vpfn = mm_to_dma_pfn(iova->pfn_lo);
+
+ ret = domain_sg_mapping(domain, start_vpfn, sglist, mm_to_dma_pfn(size), prot);
+ if (unlikely(ret)) {
+ /* clear the page */
+ dma_pte_clear_range(domain, start_vpfn,
+ start_vpfn + size - 1);
+ /* free page tables */
+ dma_pte_free_pagetable(domain, start_vpfn,
+ start_vpfn + size - 1);
+ /* free iova */
+ __free_iova(&domain->iovad, iova);
+ return 0;
}
- /* it's a non-present to present mapping */
- if (iommu_flush_iotlb_psi(iommu, domain->id,
- start_addr, offset >> VTD_PAGE_SHIFT, 1))
+ /* it's a non-present to present mapping. Only flush if caching mode */
+ if (cap_caching_mode(iommu->cap))
+ iommu_flush_iotlb_psi(iommu, 0, start_vpfn, offset_pfn);
+ else
iommu_flush_write_buffer(iommu);
+
return nelems;
}
@@ -2640,9 +3052,9 @@ static int init_iommu_hw(void)
iommu_set_root_entry(iommu);
iommu->flush.flush_context(iommu, 0, 0, 0,
- DMA_CCMD_GLOBAL_INVL, 0);
+ DMA_CCMD_GLOBAL_INVL);
iommu->flush.flush_iotlb(iommu, 0, 0, 0,
- DMA_TLB_GLOBAL_FLUSH, 0);
+ DMA_TLB_GLOBAL_FLUSH);
iommu_disable_protect_mem_regions(iommu);
iommu_enable_translation(iommu);
}
@@ -2657,9 +3069,9 @@ static void iommu_flush_all(void)
for_each_active_iommu(iommu, drhd) {
iommu->flush.flush_context(iommu, 0, 0, 0,
- DMA_CCMD_GLOBAL_INVL, 0);
+ DMA_CCMD_GLOBAL_INVL);
iommu->flush.flush_iotlb(iommu, 0, 0, 0,
- DMA_TLB_GLOBAL_FLUSH, 0);
+ DMA_TLB_GLOBAL_FLUSH);
}
}
@@ -2782,7 +3194,7 @@ int __init intel_iommu_init(void)
* Check the need for DMA-remapping initialization now.
* Above initialization will also be used by Interrupt-remapping.
*/
- if (no_iommu || swiotlb || dmar_disabled)
+ if (no_iommu || (swiotlb && !iommu_pass_through) || dmar_disabled)
return -ENODEV;
iommu_init_mempool();
@@ -2802,35 +3214,18 @@ int __init intel_iommu_init(void)
init_timer(&unmap_timer);
force_iommu = 1;
- dma_ops = &intel_dma_ops;
- init_iommu_sysfs();
- register_iommu(&intel_iommu_ops);
-
- return 0;
-}
+ if (!iommu_pass_through) {
+ printk(KERN_INFO
+ "Multi-level page-table translation for DMAR.\n");
+ dma_ops = &intel_dma_ops;
+ } else
+ printk(KERN_INFO
+ "DMAR: Pass through translation for DMAR.\n");
-static int vm_domain_add_dev_info(struct dmar_domain *domain,
- struct pci_dev *pdev)
-{
- struct device_domain_info *info;
- unsigned long flags;
-
- info = alloc_devinfo_mem();
- if (!info)
- return -ENOMEM;
-
- info->segment = pci_domain_nr(pdev->bus);
- info->bus = pdev->bus->number;
- info->devfn = pdev->devfn;
- info->dev = pdev;
- info->domain = domain;
+ init_iommu_sysfs();
- spin_lock_irqsave(&device_domain_lock, flags);
- list_add(&info->link, &domain->devices);
- list_add(&info->global, &device_domain_list);
- pdev->dev.archdata.iommu = info;
- spin_unlock_irqrestore(&device_domain_lock, flags);
+ register_iommu(&intel_iommu_ops);
return 0;
}
@@ -2862,7 +3257,7 @@ static void iommu_detach_dependent_devices(struct intel_iommu *iommu,
}
}
-static void vm_domain_remove_one_dev_info(struct dmar_domain *domain,
+static void domain_remove_one_dev_info(struct dmar_domain *domain,
struct pci_dev *pdev)
{
struct device_domain_info *info;
@@ -2888,6 +3283,7 @@ static void vm_domain_remove_one_dev_info(struct dmar_domain *domain,
info->dev->dev.archdata.iommu = NULL;
spin_unlock_irqrestore(&device_domain_lock, flags);
+ iommu_disable_dev_iotlb(info);
iommu_detach_dev(iommu, info->bus, info->devfn);
iommu_detach_dependent_devices(iommu, pdev);
free_devinfo_mem(info);
@@ -2938,6 +3334,7 @@ static void vm_domain_remove_all_dev_info(struct dmar_domain *domain)
spin_unlock_irqrestore(&device_domain_lock, flags1);
+ iommu_disable_dev_iotlb(info);
iommu = device_to_iommu(info->segment, info->bus, info->devfn);
iommu_detach_dev(iommu, info->bus, info->devfn);
iommu_detach_dependent_devices(iommu, info->dev);
@@ -2993,12 +3390,11 @@ static struct dmar_domain *iommu_alloc_vm_domain(void)
return domain;
}
-static int vm_domain_init(struct dmar_domain *domain, int guest_width)
+static int md_domain_init(struct dmar_domain *domain, int guest_width)
{
int adjust_width;
init_iova_domain(&domain->iovad, DMA_32BIT_PFN);
- spin_lock_init(&domain->mapping_lock);
spin_lock_init(&domain->iommu_lock);
domain_reserve_special_ranges(domain);
@@ -3052,8 +3448,6 @@ static void iommu_free_vm_domain(struct dmar_domain *domain)
static void vm_domain_exit(struct dmar_domain *domain)
{
- u64 end;
-
/* Domain 0 is reserved, so dont process it */
if (!domain)
return;
@@ -3061,14 +3455,12 @@ static void vm_domain_exit(struct dmar_domain *domain)
vm_domain_remove_all_dev_info(domain);
/* destroy iovas */
put_iova_domain(&domain->iovad);
- end = DOMAIN_MAX_ADDR(domain->gaw);
- end = end & (~VTD_PAGE_MASK);
/* clear ptes */
- dma_pte_clear_range(domain, 0, end);
+ dma_pte_clear_range(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
/* free page tables */
- dma_pte_free_pagetable(domain, 0, end);
+ dma_pte_free_pagetable(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
iommu_free_vm_domain(domain);
free_domain_mem(domain);
@@ -3084,7 +3476,7 @@ static int intel_iommu_domain_init(struct iommu_domain *domain)
"intel_iommu_domain_init: dmar_domain == NULL\n");
return -ENOMEM;
}
- if (vm_domain_init(dmar_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
+ if (md_domain_init(dmar_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
printk(KERN_ERR
"intel_iommu_domain_init() failed\n");
vm_domain_exit(dmar_domain);
@@ -3119,8 +3511,9 @@ static int intel_iommu_attach_device(struct iommu_domain *domain,
old_domain = find_domain(pdev);
if (old_domain) {
- if (dmar_domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE)
- vm_domain_remove_one_dev_info(old_domain, pdev);
+ if (dmar_domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE ||
+ dmar_domain->flags & DOMAIN_FLAG_STATIC_IDENTITY)
+ domain_remove_one_dev_info(old_domain, pdev);
else
domain_remove_dev_info(old_domain);
}
@@ -3142,11 +3535,11 @@ static int intel_iommu_attach_device(struct iommu_domain *domain,
return -EFAULT;
}
- ret = domain_context_mapping(dmar_domain, pdev);
+ ret = domain_add_dev_info(dmar_domain, pdev);
if (ret)
return ret;
- ret = vm_domain_add_dev_info(dmar_domain, pdev);
+ ret = domain_context_mapping(dmar_domain, pdev, CONTEXT_TT_MULTI_LEVEL);
return ret;
}
@@ -3156,7 +3549,7 @@ static void intel_iommu_detach_device(struct iommu_domain *domain,
struct dmar_domain *dmar_domain = domain->priv;
struct pci_dev *pdev = to_pci_dev(dev);
- vm_domain_remove_one_dev_info(dmar_domain, pdev);
+ domain_remove_one_dev_info(dmar_domain, pdev);
}
static int intel_iommu_map_range(struct iommu_domain *domain,
@@ -3176,7 +3569,7 @@ static int intel_iommu_map_range(struct iommu_domain *domain,
if ((iommu_prot & IOMMU_CACHE) && dmar_domain->iommu_snooping)
prot |= DMA_PTE_SNP;
- max_addr = (iova & VTD_PAGE_MASK) + VTD_PAGE_ALIGN(size);
+ max_addr = iova + size;
if (dmar_domain->max_addr < max_addr) {
int min_agaw;
u64 end;
@@ -3194,8 +3587,11 @@ static int intel_iommu_map_range(struct iommu_domain *domain,
}
dmar_domain->max_addr = max_addr;
}
-
- ret = domain_page_mapping(dmar_domain, iova, hpa, size, prot);
+ /* Round up size to next multiple of PAGE_SIZE, if it and
+ the low bits of hpa would take us onto the next page */
+ size = aligned_nrpages(hpa, size);
+ ret = domain_pfn_mapping(dmar_domain, iova >> VTD_PAGE_SHIFT,
+ hpa >> VTD_PAGE_SHIFT, size, prot);
return ret;
}
@@ -3203,15 +3599,15 @@ static void intel_iommu_unmap_range(struct iommu_domain *domain,
unsigned long iova, size_t size)
{
struct dmar_domain *dmar_domain = domain->priv;
- dma_addr_t base;
- /* The address might not be aligned */
- base = iova & VTD_PAGE_MASK;
- size = VTD_PAGE_ALIGN(size);
- dma_pte_clear_range(dmar_domain, base, base + size);
+ if (!size)
+ return;
+
+ dma_pte_clear_range(dmar_domain, iova >> VTD_PAGE_SHIFT,
+ (iova + size - 1) >> VTD_PAGE_SHIFT);
- if (dmar_domain->max_addr == base + size)
- dmar_domain->max_addr = base;
+ if (dmar_domain->max_addr == iova + size)
+ dmar_domain->max_addr = iova;
}
static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
@@ -3221,7 +3617,7 @@ static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
struct dma_pte *pte;
u64 phys = 0;
- pte = addr_to_dma_pte(dmar_domain, iova);
+ pte = pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT);
if (pte)
phys = dma_pte_addr(pte);
diff --git a/drivers/pci/intr_remapping.c b/drivers/pci/intr_remapping.c
index 3a0cb0bb0593..4f5b8712931f 100644
--- a/drivers/pci/intr_remapping.c
+++ b/drivers/pci/intr_remapping.c
@@ -10,6 +10,8 @@
#include <linux/intel-iommu.h>
#include "intr_remapping.h"
#include <acpi/acpi.h>
+#include <asm/pci-direct.h>
+#include "pci.h"
static struct ioapic_scope ir_ioapic[MAX_IO_APICS];
static int ir_ioapic_num;
@@ -314,7 +316,8 @@ int modify_irte(int irq, struct irte *irte_modified)
index = irq_iommu->irte_index + irq_iommu->sub_handle;
irte = &iommu->ir_table->base[index];
- set_64bit((unsigned long *)irte, irte_modified->low);
+ set_64bit((unsigned long *)&irte->low, irte_modified->low);
+ set_64bit((unsigned long *)&irte->high, irte_modified->high);
__iommu_flush_cache(iommu, irte, sizeof(*irte));
rc = qi_flush_iec(iommu, index, 0);
@@ -369,12 +372,32 @@ struct intel_iommu *map_dev_to_ir(struct pci_dev *dev)
return drhd->iommu;
}
+static int clear_entries(struct irq_2_iommu *irq_iommu)
+{
+ struct irte *start, *entry, *end;
+ struct intel_iommu *iommu;
+ int index;
+
+ if (irq_iommu->sub_handle)
+ return 0;
+
+ iommu = irq_iommu->iommu;
+ index = irq_iommu->irte_index + irq_iommu->sub_handle;
+
+ start = iommu->ir_table->base + index;
+ end = start + (1 << irq_iommu->irte_mask);
+
+ for (entry = start; entry < end; entry++) {
+ set_64bit((unsigned long *)&entry->low, 0);
+ set_64bit((unsigned long *)&entry->high, 0);
+ }
+
+ return qi_flush_iec(iommu, index, irq_iommu->irte_mask);
+}
+
int free_irte(int irq)
{
int rc = 0;
- int index, i;
- struct irte *irte;
- struct intel_iommu *iommu;
struct irq_2_iommu *irq_iommu;
unsigned long flags;
@@ -385,16 +408,7 @@ int free_irte(int irq)
return -1;
}
- iommu = irq_iommu->iommu;
-
- index = irq_iommu->irte_index + irq_iommu->sub_handle;
- irte = &iommu->ir_table->base[index];
-
- if (!irq_iommu->sub_handle) {
- for (i = 0; i < (1 << irq_iommu->irte_mask); i++)
- set_64bit((unsigned long *)(irte + i), 0);
- rc = qi_flush_iec(iommu, index, irq_iommu->irte_mask);
- }
+ rc = clear_entries(irq_iommu);
irq_iommu->iommu = NULL;
irq_iommu->irte_index = 0;
@@ -406,10 +420,95 @@ int free_irte(int irq)
return rc;
}
+/*
+ * source validation type
+ */
+#define SVT_NO_VERIFY 0x0 /* no verification is required */
+#define SVT_VERIFY_SID_SQ 0x1 /* verify using SID and SQ fiels */
+#define SVT_VERIFY_BUS 0x2 /* verify bus of request-id */
+
+/*
+ * source-id qualifier
+ */
+#define SQ_ALL_16 0x0 /* verify all 16 bits of request-id */
+#define SQ_13_IGNORE_1 0x1 /* verify most significant 13 bits, ignore
+ * the third least significant bit
+ */
+#define SQ_13_IGNORE_2 0x2 /* verify most significant 13 bits, ignore
+ * the second and third least significant bits
+ */
+#define SQ_13_IGNORE_3 0x3 /* verify most significant 13 bits, ignore
+ * the least three significant bits
+ */
+
+/*
+ * set SVT, SQ and SID fields of irte to verify
+ * source ids of interrupt requests
+ */
+static void set_irte_sid(struct irte *irte, unsigned int svt,
+ unsigned int sq, unsigned int sid)
+{
+ irte->svt = svt;
+ irte->sq = sq;
+ irte->sid = sid;
+}
+
+int set_ioapic_sid(struct irte *irte, int apic)
+{
+ int i;
+ u16 sid = 0;
+
+ if (!irte)
+ return -1;
+
+ for (i = 0; i < MAX_IO_APICS; i++) {
+ if (ir_ioapic[i].id == apic) {
+ sid = (ir_ioapic[i].bus << 8) | ir_ioapic[i].devfn;
+ break;
+ }
+ }
+
+ if (sid == 0) {
+ pr_warning("Failed to set source-id of IOAPIC (%d)\n", apic);
+ return -1;
+ }
+
+ set_irte_sid(irte, 1, 0, sid);
+
+ return 0;
+}
+
+int set_msi_sid(struct irte *irte, struct pci_dev *dev)
+{
+ struct pci_dev *bridge;
+
+ if (!irte || !dev)
+ return -1;
+
+ /* PCIe device or Root Complex integrated PCI device */
+ if (dev->is_pcie || !dev->bus->parent) {
+ set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_ALL_16,
+ (dev->bus->number << 8) | dev->devfn);
+ return 0;
+ }
+
+ bridge = pci_find_upstream_pcie_bridge(dev);
+ if (bridge) {
+ if (bridge->is_pcie) /* this is a PCIE-to-PCI/PCIX bridge */
+ set_irte_sid(irte, SVT_VERIFY_BUS, SQ_ALL_16,
+ (bridge->bus->number << 8) | dev->bus->number);
+ else /* this is a legacy PCI bridge */
+ set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_ALL_16,
+ (bridge->bus->number << 8) | bridge->devfn);
+ }
+
+ return 0;
+}
+
static void iommu_set_intr_remapping(struct intel_iommu *iommu, int mode)
{
u64 addr;
- u32 cmd, sts;
+ u32 sts;
unsigned long flags;
addr = virt_to_phys((void *)iommu->ir_table->base);
@@ -420,9 +519,8 @@ static void iommu_set_intr_remapping(struct intel_iommu *iommu, int mode)
(addr) | IR_X2APIC_MODE(mode) | INTR_REMAP_TABLE_REG_SIZE);
/* Set interrupt-remapping table pointer */
- cmd = iommu->gcmd | DMA_GCMD_SIRTP;
iommu->gcmd |= DMA_GCMD_SIRTP;
- writel(cmd, iommu->reg + DMAR_GCMD_REG);
+ writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
readl, (sts & DMA_GSTS_IRTPS), sts);
@@ -437,9 +535,8 @@ static void iommu_set_intr_remapping(struct intel_iommu *iommu, int mode)
spin_lock_irqsave(&iommu->register_lock, flags);
/* Enable interrupt-remapping */
- cmd = iommu->gcmd | DMA_GCMD_IRE;
iommu->gcmd |= DMA_GCMD_IRE;
- writel(cmd, iommu->reg + DMAR_GCMD_REG);
+ writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
readl, (sts & DMA_GSTS_IRES), sts);
@@ -614,6 +711,35 @@ error:
return -1;
}
+static void ir_parse_one_ioapic_scope(struct acpi_dmar_device_scope *scope,
+ struct intel_iommu *iommu)
+{
+ struct acpi_dmar_pci_path *path;
+ u8 bus;
+ int count;
+
+ bus = scope->bus;
+ path = (struct acpi_dmar_pci_path *)(scope + 1);
+ count = (scope->length - sizeof(struct acpi_dmar_device_scope))
+ / sizeof(struct acpi_dmar_pci_path);
+
+ while (--count > 0) {
+ /*
+ * Access PCI directly due to the PCI
+ * subsystem isn't initialized yet.
+ */
+ bus = read_pci_config_byte(bus, path->dev, path->fn,
+ PCI_SECONDARY_BUS);
+ path++;
+ }
+
+ ir_ioapic[ir_ioapic_num].bus = bus;
+ ir_ioapic[ir_ioapic_num].devfn = PCI_DEVFN(path->dev, path->fn);
+ ir_ioapic[ir_ioapic_num].iommu = iommu;
+ ir_ioapic[ir_ioapic_num].id = scope->enumeration_id;
+ ir_ioapic_num++;
+}
+
static int ir_parse_ioapic_scope(struct acpi_dmar_header *header,
struct intel_iommu *iommu)
{
@@ -638,9 +764,7 @@ static int ir_parse_ioapic_scope(struct acpi_dmar_header *header,
" 0x%Lx\n", scope->enumeration_id,
drhd->address);
- ir_ioapic[ir_ioapic_num].iommu = iommu;
- ir_ioapic[ir_ioapic_num].id = scope->enumeration_id;
- ir_ioapic_num++;
+ ir_parse_one_ioapic_scope(scope, iommu);
}
start += scope->length;
}
diff --git a/drivers/pci/intr_remapping.h b/drivers/pci/intr_remapping.h
index ca48f0df8ac9..63a263c18415 100644
--- a/drivers/pci/intr_remapping.h
+++ b/drivers/pci/intr_remapping.h
@@ -3,6 +3,8 @@
struct ioapic_scope {
struct intel_iommu *iommu;
unsigned int id;
+ unsigned int bus; /* PCI bus number */
+ unsigned int devfn; /* PCI devfn number */
};
#define IR_X2APIC_MODE(mode) (mode ? (1 << 11) : 0)
diff --git a/drivers/pci/iov.c b/drivers/pci/iov.c
index b497daab3d4a..e3a87210e947 100644
--- a/drivers/pci/iov.c
+++ b/drivers/pci/iov.c
@@ -5,6 +5,7 @@
*
* PCI Express I/O Virtualization (IOV) support.
* Single Root IOV 1.0
+ * Address Translation Service 1.0
*/
#include <linux/pci.h>
@@ -110,7 +111,7 @@ static int virtfn_add(struct pci_dev *dev, int id, int reset)
}
if (reset)
- pci_execute_reset_function(virtfn);
+ __pci_reset_function(virtfn);
pci_device_add(virtfn, virtfn->bus);
mutex_unlock(&iov->dev->sriov->lock);
@@ -164,7 +165,7 @@ static void virtfn_remove(struct pci_dev *dev, int id, int reset)
if (reset) {
device_release_driver(&virtfn->dev);
- pci_execute_reset_function(virtfn);
+ __pci_reset_function(virtfn);
}
sprintf(buf, "virtfn%u", id);
@@ -487,13 +488,15 @@ found:
iov->self = dev;
pci_read_config_dword(dev, pos + PCI_SRIOV_CAP, &iov->cap);
pci_read_config_byte(dev, pos + PCI_SRIOV_FUNC_LINK, &iov->link);
+ if (dev->pcie_type == PCI_EXP_TYPE_RC_END)
+ iov->link = PCI_DEVFN(PCI_SLOT(dev->devfn), iov->link);
if (pdev)
iov->dev = pci_dev_get(pdev);
- else {
+ else
iov->dev = dev;
- mutex_init(&iov->lock);
- }
+
+ mutex_init(&iov->lock);
dev->sriov = iov;
dev->is_physfn = 1;
@@ -513,11 +516,11 @@ static void sriov_release(struct pci_dev *dev)
{
BUG_ON(dev->sriov->nr_virtfn);
- if (dev == dev->sriov->dev)
- mutex_destroy(&dev->sriov->lock);
- else
+ if (dev != dev->sriov->dev)
pci_dev_put(dev->sriov->dev);
+ mutex_destroy(&dev->sriov->lock);
+
kfree(dev->sriov);
dev->sriov = NULL;
}
@@ -679,3 +682,145 @@ irqreturn_t pci_sriov_migration(struct pci_dev *dev)
return sriov_migration(dev) ? IRQ_HANDLED : IRQ_NONE;
}
EXPORT_SYMBOL_GPL(pci_sriov_migration);
+
+static int ats_alloc_one(struct pci_dev *dev, int ps)
+{
+ int pos;
+ u16 cap;
+ struct pci_ats *ats;
+
+ pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ATS);
+ if (!pos)
+ return -ENODEV;
+
+ ats = kzalloc(sizeof(*ats), GFP_KERNEL);
+ if (!ats)
+ return -ENOMEM;
+
+ ats->pos = pos;
+ ats->stu = ps;
+ pci_read_config_word(dev, pos + PCI_ATS_CAP, &cap);
+ ats->qdep = PCI_ATS_CAP_QDEP(cap) ? PCI_ATS_CAP_QDEP(cap) :
+ PCI_ATS_MAX_QDEP;
+ dev->ats = ats;
+
+ return 0;
+}
+
+static void ats_free_one(struct pci_dev *dev)
+{
+ kfree(dev->ats);
+ dev->ats = NULL;
+}
+
+/**
+ * pci_enable_ats - enable the ATS capability
+ * @dev: the PCI device
+ * @ps: the IOMMU page shift
+ *
+ * Returns 0 on success, or negative on failure.
+ */
+int pci_enable_ats(struct pci_dev *dev, int ps)
+{
+ int rc;
+ u16 ctrl;
+
+ BUG_ON(dev->ats && dev->ats->is_enabled);
+
+ if (ps < PCI_ATS_MIN_STU)
+ return -EINVAL;
+
+ if (dev->is_physfn || dev->is_virtfn) {
+ struct pci_dev *pdev = dev->is_physfn ? dev : dev->physfn;
+
+ mutex_lock(&pdev->sriov->lock);
+ if (pdev->ats)
+ rc = pdev->ats->stu == ps ? 0 : -EINVAL;
+ else
+ rc = ats_alloc_one(pdev, ps);
+
+ if (!rc)
+ pdev->ats->ref_cnt++;
+ mutex_unlock(&pdev->sriov->lock);
+ if (rc)
+ return rc;
+ }
+
+ if (!dev->is_physfn) {
+ rc = ats_alloc_one(dev, ps);
+ if (rc)
+ return rc;
+ }
+
+ ctrl = PCI_ATS_CTRL_ENABLE;
+ if (!dev->is_virtfn)
+ ctrl |= PCI_ATS_CTRL_STU(ps - PCI_ATS_MIN_STU);
+ pci_write_config_word(dev, dev->ats->pos + PCI_ATS_CTRL, ctrl);
+
+ dev->ats->is_enabled = 1;
+
+ return 0;
+}
+
+/**
+ * pci_disable_ats - disable the ATS capability
+ * @dev: the PCI device
+ */
+void pci_disable_ats(struct pci_dev *dev)
+{
+ u16 ctrl;
+
+ BUG_ON(!dev->ats || !dev->ats->is_enabled);
+
+ pci_read_config_word(dev, dev->ats->pos + PCI_ATS_CTRL, &ctrl);
+ ctrl &= ~PCI_ATS_CTRL_ENABLE;
+ pci_write_config_word(dev, dev->ats->pos + PCI_ATS_CTRL, ctrl);
+
+ dev->ats->is_enabled = 0;
+
+ if (dev->is_physfn || dev->is_virtfn) {
+ struct pci_dev *pdev = dev->is_physfn ? dev : dev->physfn;
+
+ mutex_lock(&pdev->sriov->lock);
+ pdev->ats->ref_cnt--;
+ if (!pdev->ats->ref_cnt)
+ ats_free_one(pdev);
+ mutex_unlock(&pdev->sriov->lock);
+ }
+
+ if (!dev->is_physfn)
+ ats_free_one(dev);
+}
+
+/**
+ * pci_ats_queue_depth - query the ATS Invalidate Queue Depth
+ * @dev: the PCI device
+ *
+ * Returns the queue depth on success, or negative on failure.
+ *
+ * The ATS spec uses 0 in the Invalidate Queue Depth field to
+ * indicate that the function can accept 32 Invalidate Request.
+ * But here we use the `real' values (i.e. 1~32) for the Queue
+ * Depth; and 0 indicates the function shares the Queue with
+ * other functions (doesn't exclusively own a Queue).
+ */
+int pci_ats_queue_depth(struct pci_dev *dev)
+{
+ int pos;
+ u16 cap;
+
+ if (dev->is_virtfn)
+ return 0;
+
+ if (dev->ats)
+ return dev->ats->qdep;
+
+ pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ATS);
+ if (!pos)
+ return -ENODEV;
+
+ pci_read_config_word(dev, pos + PCI_ATS_CAP, &cap);
+
+ return PCI_ATS_CAP_QDEP(cap) ? PCI_ATS_CAP_QDEP(cap) :
+ PCI_ATS_MAX_QDEP;
+}
diff --git a/drivers/pci/iova.c b/drivers/pci/iova.c
index 2287116e9822..46dd440e2315 100644
--- a/drivers/pci/iova.c
+++ b/drivers/pci/iova.c
@@ -1,9 +1,19 @@
/*
- * Copyright (c) 2006, Intel Corporation.
+ * Copyright © 2006-2009, Intel Corporation.
*
- * This file is released under the GPLv2.
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
+ * Place - Suite 330, Boston, MA 02111-1307 USA.
*
- * Copyright (C) 2006-2008 Intel Corporation
* Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
*/
@@ -123,7 +133,15 @@ move_left:
/* Insert the new_iova into domain rbtree by holding writer lock */
/* Add new node and rebalance tree. */
{
- struct rb_node **entry = &((prev)), *parent = NULL;
+ struct rb_node **entry, *parent = NULL;
+
+ /* If we have 'prev', it's a valid place to start the
+ insertion. Otherwise, start from the root. */
+ if (prev)
+ entry = &prev;
+ else
+ entry = &iovad->rbroot.rb_node;
+
/* Figure out where to put new node */
while (*entry) {
struct iova *this = container_of(*entry,
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
index 362773247fbf..d986afb7032b 100644
--- a/drivers/pci/msi.c
+++ b/drivers/pci/msi.c
@@ -75,22 +75,17 @@ void arch_teardown_msi_irqs(struct pci_dev *dev)
}
#endif
-static void __msi_set_enable(struct pci_dev *dev, int pos, int enable)
+static void msi_set_enable(struct pci_dev *dev, int pos, int enable)
{
u16 control;
- if (pos) {
- pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &control);
- control &= ~PCI_MSI_FLAGS_ENABLE;
- if (enable)
- control |= PCI_MSI_FLAGS_ENABLE;
- pci_write_config_word(dev, pos + PCI_MSI_FLAGS, control);
- }
-}
+ BUG_ON(!pos);
-static void msi_set_enable(struct pci_dev *dev, int enable)
-{
- __msi_set_enable(dev, pci_find_capability(dev, PCI_CAP_ID_MSI), enable);
+ pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &control);
+ control &= ~PCI_MSI_FLAGS_ENABLE;
+ if (enable)
+ control |= PCI_MSI_FLAGS_ENABLE;
+ pci_write_config_word(dev, pos + PCI_MSI_FLAGS, control);
}
static void msix_set_enable(struct pci_dev *dev, int enable)
@@ -131,21 +126,24 @@ static inline __attribute_const__ u32 msi_enabled_mask(u16 control)
* mask all MSI interrupts by clearing the MSI enable bit does not work
* reliably as devices without an INTx disable bit will then generate a
* level IRQ which will never be cleared.
- *
- * Returns 1 if it succeeded in masking the interrupt and 0 if the device
- * doesn't support MSI masking.
*/
-static void msi_mask_irq(struct msi_desc *desc, u32 mask, u32 flag)
+static u32 __msi_mask_irq(struct msi_desc *desc, u32 mask, u32 flag)
{
u32 mask_bits = desc->masked;
if (!desc->msi_attrib.maskbit)
- return;
+ return 0;
mask_bits &= ~mask;
mask_bits |= flag;
pci_write_config_dword(desc->dev, desc->mask_pos, mask_bits);
- desc->masked = mask_bits;
+
+ return mask_bits;
+}
+
+static void msi_mask_irq(struct msi_desc *desc, u32 mask, u32 flag)
+{
+ desc->masked = __msi_mask_irq(desc, mask, flag);
}
/*
@@ -155,15 +153,21 @@ static void msi_mask_irq(struct msi_desc *desc, u32 mask, u32 flag)
* file. This saves a few milliseconds when initialising devices with lots
* of MSI-X interrupts.
*/
-static void msix_mask_irq(struct msi_desc *desc, u32 flag)
+static u32 __msix_mask_irq(struct msi_desc *desc, u32 flag)
{
u32 mask_bits = desc->masked;
unsigned offset = desc->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE +
- PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET;
+ PCI_MSIX_ENTRY_VECTOR_CTRL;
mask_bits &= ~1;
mask_bits |= flag;
writel(mask_bits, desc->mask_base + offset);
- desc->masked = mask_bits;
+
+ return mask_bits;
+}
+
+static void msix_mask_irq(struct msi_desc *desc, u32 flag)
+{
+ desc->masked = __msix_mask_irq(desc, flag);
}
static void msi_set_mask_bit(unsigned irq, u32 flag)
@@ -196,9 +200,9 @@ void read_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg)
void __iomem *base = entry->mask_base +
entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE;
- msg->address_lo = readl(base + PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET);
- msg->address_hi = readl(base + PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET);
- msg->data = readl(base + PCI_MSIX_ENTRY_DATA_OFFSET);
+ msg->address_lo = readl(base + PCI_MSIX_ENTRY_LOWER_ADDR);
+ msg->address_hi = readl(base + PCI_MSIX_ENTRY_UPPER_ADDR);
+ msg->data = readl(base + PCI_MSIX_ENTRY_DATA);
} else {
struct pci_dev *dev = entry->dev;
int pos = entry->msi_attrib.pos;
@@ -233,11 +237,9 @@ void write_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg)
base = entry->mask_base +
entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE;
- writel(msg->address_lo,
- base + PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET);
- writel(msg->address_hi,
- base + PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET);
- writel(msg->data, base + PCI_MSIX_ENTRY_DATA_OFFSET);
+ writel(msg->address_lo, base + PCI_MSIX_ENTRY_LOWER_ADDR);
+ writel(msg->address_hi, base + PCI_MSIX_ENTRY_UPPER_ADDR);
+ writel(msg->data, base + PCI_MSIX_ENTRY_DATA);
} else {
struct pci_dev *dev = entry->dev;
int pos = entry->msi_attrib.pos;
@@ -303,7 +305,7 @@ static void __pci_restore_msi_state(struct pci_dev *dev)
pos = entry->msi_attrib.pos;
pci_intx_for_msi(dev, 0);
- msi_set_enable(dev, 0);
+ msi_set_enable(dev, pos, 0);
write_msi_msg(dev->irq, &entry->msg);
pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &control);
@@ -321,22 +323,22 @@ static void __pci_restore_msix_state(struct pci_dev *dev)
if (!dev->msix_enabled)
return;
+ BUG_ON(list_empty(&dev->msi_list));
+ entry = list_entry(dev->msi_list.next, struct msi_desc, list);
+ pos = entry->msi_attrib.pos;
+ pci_read_config_word(dev, pos + PCI_MSIX_FLAGS, &control);
/* route the table */
pci_intx_for_msi(dev, 0);
- msix_set_enable(dev, 0);
+ control |= PCI_MSIX_FLAGS_ENABLE | PCI_MSIX_FLAGS_MASKALL;
+ pci_write_config_word(dev, pos + PCI_MSIX_FLAGS, control);
list_for_each_entry(entry, &dev->msi_list, list) {
write_msi_msg(entry->irq, &entry->msg);
msix_mask_irq(entry, entry->masked);
}
- BUG_ON(list_empty(&dev->msi_list));
- entry = list_entry(dev->msi_list.next, struct msi_desc, list);
- pos = entry->msi_attrib.pos;
- pci_read_config_word(dev, pos + PCI_MSIX_FLAGS, &control);
control &= ~PCI_MSIX_FLAGS_MASKALL;
- control |= PCI_MSIX_FLAGS_ENABLE;
pci_write_config_word(dev, pos + PCI_MSIX_FLAGS, control);
}
@@ -365,9 +367,9 @@ static int msi_capability_init(struct pci_dev *dev, int nvec)
u16 control;
unsigned mask;
- msi_set_enable(dev, 0); /* Ensure msi is disabled as I set it up */
-
pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
+ msi_set_enable(dev, pos, 0); /* Disable MSI during set up */
+
pci_read_config_word(dev, msi_control_reg(pos), &control);
/* MSI Entry Initialization */
entry = alloc_msi_entry(dev);
@@ -381,7 +383,7 @@ static int msi_capability_init(struct pci_dev *dev, int nvec)
entry->msi_attrib.default_irq = dev->irq; /* Save IOAPIC IRQ */
entry->msi_attrib.pos = pos;
- entry->mask_pos = msi_mask_bits_reg(pos, entry->msi_attrib.is_64);
+ entry->mask_pos = msi_mask_reg(pos, entry->msi_attrib.is_64);
/* All MSIs are unmasked by default, Mask them all */
if (entry->msi_attrib.maskbit)
pci_read_config_dword(dev, entry->mask_pos, &entry->masked);
@@ -393,13 +395,14 @@ static int msi_capability_init(struct pci_dev *dev, int nvec)
/* Configure MSI capability structure */
ret = arch_setup_msi_irqs(dev, nvec, PCI_CAP_ID_MSI);
if (ret) {
+ msi_mask_irq(entry, mask, ~mask);
msi_free_irqs(dev);
return ret;
}
/* Set MSI enabled bits */
pci_intx_for_msi(dev, 0);
- msi_set_enable(dev, 1);
+ msi_set_enable(dev, pos, 1);
dev->msi_enabled = 1;
dev->irq = entry->irq;
@@ -427,11 +430,14 @@ static int msix_capability_init(struct pci_dev *dev,
u8 bir;
void __iomem *base;
- msix_set_enable(dev, 0);/* Ensure msix is disabled as I set it up */
-
pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
+ pci_read_config_word(dev, pos + PCI_MSIX_FLAGS, &control);
+
+ /* Ensure MSI-X is disabled while it is set up */
+ control &= ~PCI_MSIX_FLAGS_ENABLE;
+ pci_write_config_word(dev, pos + PCI_MSIX_FLAGS, control);
+
/* Request & Map MSI-X table region */
- pci_read_config_word(dev, msi_control_reg(pos), &control);
nr_entries = multi_msix_capable(control);
pci_read_config_dword(dev, msix_table_offset_reg(pos), &table_offset);
@@ -442,11 +448,16 @@ static int msix_capability_init(struct pci_dev *dev,
if (base == NULL)
return -ENOMEM;
- /* MSI-X Table Initialization */
for (i = 0; i < nvec; i++) {
entry = alloc_msi_entry(dev);
- if (!entry)
- break;
+ if (!entry) {
+ if (!i)
+ iounmap(base);
+ else
+ msi_free_irqs(dev);
+ /* No enough memory. Don't try again */
+ return -ENOMEM;
+ }
j = entries[i].entry;
entry->msi_attrib.is_msix = 1;
@@ -455,7 +466,6 @@ static int msix_capability_init(struct pci_dev *dev,
entry->msi_attrib.default_irq = dev->irq;
entry->msi_attrib.pos = pos;
entry->mask_base = base;
- msix_mask_irq(entry, 1);
list_add_tail(&entry->list, &dev->msi_list);
}
@@ -480,22 +490,31 @@ static int msix_capability_init(struct pci_dev *dev,
return ret;
}
+ /*
+ * Some devices require MSI-X to be enabled before we can touch the
+ * MSI-X registers. We need to mask all the vectors to prevent
+ * interrupts coming in before they're fully set up.
+ */
+ control |= PCI_MSIX_FLAGS_MASKALL | PCI_MSIX_FLAGS_ENABLE;
+ pci_write_config_word(dev, pos + PCI_MSIX_FLAGS, control);
+
i = 0;
list_for_each_entry(entry, &dev->msi_list, list) {
entries[i].vector = entry->irq;
set_irq_msi(entry->irq, entry);
+ j = entries[i].entry;
+ entry->masked = readl(base + j * PCI_MSIX_ENTRY_SIZE +
+ PCI_MSIX_ENTRY_VECTOR_CTRL);
+ msix_mask_irq(entry, 1);
i++;
}
- /* Set MSI-X enabled bits */
+
+ /* Set MSI-X enabled bits and unmask the function */
pci_intx_for_msi(dev, 0);
- msix_set_enable(dev, 1);
dev->msix_enabled = 1;
- list_for_each_entry(entry, &dev->msi_list, list) {
- int vector = entry->msi_attrib.entry_nr;
- entry->masked = readl(base + vector * PCI_MSIX_ENTRY_SIZE +
- PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET);
- }
+ control &= ~PCI_MSIX_FLAGS_MASKALL;
+ pci_write_config_word(dev, pos + PCI_MSIX_FLAGS, control);
return 0;
}
@@ -596,19 +615,24 @@ void pci_msi_shutdown(struct pci_dev *dev)
struct msi_desc *desc;
u32 mask;
u16 ctrl;
+ unsigned pos;
if (!pci_msi_enable || !dev || !dev->msi_enabled)
return;
- msi_set_enable(dev, 0);
+ BUG_ON(list_empty(&dev->msi_list));
+ desc = list_first_entry(&dev->msi_list, struct msi_desc, list);
+ pos = desc->msi_attrib.pos;
+
+ msi_set_enable(dev, pos, 0);
pci_intx_for_msi(dev, 1);
dev->msi_enabled = 0;
- BUG_ON(list_empty(&dev->msi_list));
- desc = list_first_entry(&dev->msi_list, struct msi_desc, list);
- pci_read_config_word(dev, desc->msi_attrib.pos + PCI_MSI_FLAGS, &ctrl);
+ /* Return the device with MSI unmasked as initial states */
+ pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &ctrl);
mask = msi_capable_mask(ctrl);
- msi_mask_irq(desc, mask, ~mask);
+ /* Keep cached state to be restored */
+ __msi_mask_irq(desc, mask, ~mask);
/* Restore dev->irq to its default pin-assertion irq */
dev->irq = desc->msi_attrib.default_irq;
@@ -648,10 +672,6 @@ static int msi_free_irqs(struct pci_dev* dev)
list_for_each_entry_safe(entry, tmp, &dev->msi_list, list) {
if (entry->msi_attrib.is_msix) {
- writel(1, entry->mask_base + entry->msi_attrib.entry_nr
- * PCI_MSIX_ENTRY_SIZE
- + PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET);
-
if (list_is_last(&entry->list, &dev->msi_list))
iounmap(entry->mask_base);
}
@@ -691,8 +711,8 @@ int pci_msix_table_size(struct pci_dev *dev)
* indicates the successful configuration of MSI-X capability structure
* with new allocated MSI-X irqs. A return of < 0 indicates a failure.
* Or a return of > 0 indicates that driver request is exceeding the number
- * of irqs available. Driver should use the returned value to re-send
- * its request.
+ * of irqs or MSI-X vectors available. Driver should use the returned value to
+ * re-send its request.
**/
int pci_enable_msix(struct pci_dev* dev, struct msix_entry *entries, int nvec)
{
@@ -708,7 +728,7 @@ int pci_enable_msix(struct pci_dev* dev, struct msix_entry *entries, int nvec)
nr_entries = pci_msix_table_size(dev);
if (nvec > nr_entries)
- return -EINVAL;
+ return nr_entries;
/* Check for any invalid entries */
for (i = 0; i < nvec; i++) {
@@ -739,9 +759,17 @@ static void msix_free_all_irqs(struct pci_dev *dev)
void pci_msix_shutdown(struct pci_dev* dev)
{
+ struct msi_desc *entry;
+
if (!pci_msi_enable || !dev || !dev->msix_enabled)
return;
+ /* Return the device with MSI-X masked as initial states */
+ list_for_each_entry(entry, &dev->msi_list, list) {
+ /* Keep cached states to be restored */
+ __msix_mask_irq(entry, 1);
+ }
+
msix_set_enable(dev, 0);
pci_intx_for_msi(dev, 1);
dev->msix_enabled = 0;
diff --git a/drivers/pci/msi.h b/drivers/pci/msi.h
index 71f4df2ef654..de27c1cb5a2b 100644
--- a/drivers/pci/msi.h
+++ b/drivers/pci/msi.h
@@ -6,31 +6,25 @@
#ifndef MSI_H
#define MSI_H
-#define PCI_MSIX_ENTRY_SIZE 16
-#define PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET 0
-#define PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET 4
-#define PCI_MSIX_ENTRY_DATA_OFFSET 8
-#define PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET 12
+#define PCI_MSIX_ENTRY_SIZE 16
+#define PCI_MSIX_ENTRY_LOWER_ADDR 0
+#define PCI_MSIX_ENTRY_UPPER_ADDR 4
+#define PCI_MSIX_ENTRY_DATA 8
+#define PCI_MSIX_ENTRY_VECTOR_CTRL 12
#define msi_control_reg(base) (base + PCI_MSI_FLAGS)
#define msi_lower_address_reg(base) (base + PCI_MSI_ADDRESS_LO)
#define msi_upper_address_reg(base) (base + PCI_MSI_ADDRESS_HI)
#define msi_data_reg(base, is64bit) \
- ( (is64bit == 1) ? base+PCI_MSI_DATA_64 : base+PCI_MSI_DATA_32 )
-#define msi_mask_bits_reg(base, is64bit) \
- ( (is64bit == 1) ? base+PCI_MSI_MASK_BIT : base+PCI_MSI_MASK_BIT-4)
-#define msi_disable(control) control &= ~PCI_MSI_FLAGS_ENABLE
+ (base + ((is64bit == 1) ? PCI_MSI_DATA_64 : PCI_MSI_DATA_32))
+#define msi_mask_reg(base, is64bit) \
+ (base + ((is64bit == 1) ? PCI_MSI_MASK_64 : PCI_MSI_MASK_32))
#define is_64bit_address(control) (!!(control & PCI_MSI_FLAGS_64BIT))
#define is_mask_bit_support(control) (!!(control & PCI_MSI_FLAGS_MASKBIT))
#define msix_table_offset_reg(base) (base + 0x04)
#define msix_pba_offset_reg(base) (base + 0x08)
-#define msix_enable(control) control |= PCI_MSIX_FLAGS_ENABLE
-#define msix_disable(control) control &= ~PCI_MSIX_FLAGS_ENABLE
#define msix_table_size(control) ((control & PCI_MSIX_FLAGS_QSIZE)+1)
-#define multi_msix_capable msix_table_size
-#define msix_unmask(address) (address & ~PCI_MSIX_FLAGS_BITMASK)
-#define msix_mask(address) (address | PCI_MSIX_FLAGS_BITMASK)
-#define msix_is_pending(address) (address & PCI_MSIX_FLAGS_PENDMASK)
+#define multi_msix_capable(control) msix_table_size((control))
#endif /* MSI_H */
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 07bbb9b3b93f..dbd0f947f497 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -485,6 +485,8 @@ static int pci_raw_set_power_state(struct pci_dev *dev, pci_power_t state)
pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
pmcsr |= state;
break;
+ case PCI_D3hot:
+ case PCI_D3cold:
case PCI_UNKNOWN: /* Boot-up */
if ((pmcsr & PCI_PM_CTRL_STATE_MASK) == PCI_D3hot
&& !(pmcsr & PCI_PM_CTRL_NO_SOFT_RESET))
@@ -1208,7 +1210,7 @@ void pci_pme_active(struct pci_dev *dev, bool enable)
* Error code depending on the platform is returned if both the platform and
* the native mechanism fail to enable the generation of wake-up events
*/
-int pci_enable_wake(struct pci_dev *dev, pci_power_t state, int enable)
+int pci_enable_wake(struct pci_dev *dev, pci_power_t state, bool enable)
{
int error = 0;
bool pme_done = false;
@@ -1287,15 +1289,14 @@ pci_power_t pci_target_state(struct pci_dev *dev)
default:
target_state = state;
}
+ } else if (!dev->pm_cap) {
+ target_state = PCI_D0;
} else if (device_may_wakeup(&dev->dev)) {
/*
* Find the deepest state from which the device can generate
* wake-up events, make it the target state and enable device
* to generate PME#.
*/
- if (!dev->pm_cap)
- return PCI_POWER_ERROR;
-
if (dev->pme_support) {
while (target_state
&& !(dev->pme_support & (1 << target_state)))
@@ -1516,11 +1517,20 @@ void pci_enable_ari(struct pci_dev *dev)
*
* Perform INTx swizzling for a device behind one level of bridge. This is
* required by section 9.1 of the PCI-to-PCI bridge specification for devices
- * behind bridges on add-in cards.
+ * behind bridges on add-in cards. For devices with ARI enabled, the slot
+ * number is always 0 (see the Implementation Note in section 2.2.8.1 of
+ * the PCI Express Base Specification, Revision 2.1)
*/
u8 pci_swizzle_interrupt_pin(struct pci_dev *dev, u8 pin)
{
- return (((pin - 1) + PCI_SLOT(dev->devfn)) % 4) + 1;
+ int slot;
+
+ if (pci_ari_enabled(dev->bus))
+ slot = 0;
+ else
+ slot = PCI_SLOT(dev->devfn);
+
+ return (((pin - 1) + slot) % 4) + 1;
}
int
@@ -1532,7 +1542,7 @@ pci_get_interrupt_pin(struct pci_dev *dev, struct pci_dev **bridge)
if (!pin)
return -1;
- while (dev->bus->parent) {
+ while (!pci_is_root_bus(dev->bus)) {
pin = pci_swizzle_interrupt_pin(dev, pin);
dev = dev->bus->self;
}
@@ -1552,7 +1562,7 @@ u8 pci_common_swizzle(struct pci_dev *dev, u8 *pinp)
{
u8 pin = *pinp;
- while (dev->bus->parent) {
+ while (!pci_is_root_bus(dev->bus)) {
pin = pci_swizzle_interrupt_pin(dev, pin);
dev = dev->bus->self;
}
@@ -2058,111 +2068,177 @@ int pci_set_dma_seg_boundary(struct pci_dev *dev, unsigned long mask)
EXPORT_SYMBOL(pci_set_dma_seg_boundary);
#endif
-static int __pcie_flr(struct pci_dev *dev, int probe)
+static int pcie_flr(struct pci_dev *dev, int probe)
{
- u16 status;
+ int i;
+ int pos;
u32 cap;
- int exppos = pci_find_capability(dev, PCI_CAP_ID_EXP);
+ u16 status;
- if (!exppos)
+ pos = pci_find_capability(dev, PCI_CAP_ID_EXP);
+ if (!pos)
return -ENOTTY;
- pci_read_config_dword(dev, exppos + PCI_EXP_DEVCAP, &cap);
+
+ pci_read_config_dword(dev, pos + PCI_EXP_DEVCAP, &cap);
if (!(cap & PCI_EXP_DEVCAP_FLR))
return -ENOTTY;
if (probe)
return 0;
- pci_block_user_cfg_access(dev);
-
/* Wait for Transaction Pending bit clean */
- pci_read_config_word(dev, exppos + PCI_EXP_DEVSTA, &status);
- if (!(status & PCI_EXP_DEVSTA_TRPND))
- goto transaction_done;
+ for (i = 0; i < 4; i++) {
+ if (i)
+ msleep((1 << (i - 1)) * 100);
- msleep(100);
- pci_read_config_word(dev, exppos + PCI_EXP_DEVSTA, &status);
- if (!(status & PCI_EXP_DEVSTA_TRPND))
- goto transaction_done;
-
- dev_info(&dev->dev, "Busy after 100ms while trying to reset; "
- "sleeping for 1 second\n");
- ssleep(1);
- pci_read_config_word(dev, exppos + PCI_EXP_DEVSTA, &status);
- if (status & PCI_EXP_DEVSTA_TRPND)
- dev_info(&dev->dev, "Still busy after 1s; "
- "proceeding with reset anyway\n");
-
-transaction_done:
- pci_write_config_word(dev, exppos + PCI_EXP_DEVCTL,
+ pci_read_config_word(dev, pos + PCI_EXP_DEVSTA, &status);
+ if (!(status & PCI_EXP_DEVSTA_TRPND))
+ goto clear;
+ }
+
+ dev_err(&dev->dev, "transaction is not cleared; "
+ "proceeding with reset anyway\n");
+
+clear:
+ pci_write_config_word(dev, pos + PCI_EXP_DEVCTL,
PCI_EXP_DEVCTL_BCR_FLR);
- mdelay(100);
+ msleep(100);
- pci_unblock_user_cfg_access(dev);
return 0;
}
-static int __pci_af_flr(struct pci_dev *dev, int probe)
+static int pci_af_flr(struct pci_dev *dev, int probe)
{
- int cappos = pci_find_capability(dev, PCI_CAP_ID_AF);
- u8 status;
+ int i;
+ int pos;
u8 cap;
+ u8 status;
- if (!cappos)
+ pos = pci_find_capability(dev, PCI_CAP_ID_AF);
+ if (!pos)
return -ENOTTY;
- pci_read_config_byte(dev, cappos + PCI_AF_CAP, &cap);
+
+ pci_read_config_byte(dev, pos + PCI_AF_CAP, &cap);
if (!(cap & PCI_AF_CAP_TP) || !(cap & PCI_AF_CAP_FLR))
return -ENOTTY;
if (probe)
return 0;
- pci_block_user_cfg_access(dev);
-
/* Wait for Transaction Pending bit clean */
- pci_read_config_byte(dev, cappos + PCI_AF_STATUS, &status);
- if (!(status & PCI_AF_STATUS_TP))
- goto transaction_done;
+ for (i = 0; i < 4; i++) {
+ if (i)
+ msleep((1 << (i - 1)) * 100);
+
+ pci_read_config_byte(dev, pos + PCI_AF_STATUS, &status);
+ if (!(status & PCI_AF_STATUS_TP))
+ goto clear;
+ }
+
+ dev_err(&dev->dev, "transaction is not cleared; "
+ "proceeding with reset anyway\n");
+
+clear:
+ pci_write_config_byte(dev, pos + PCI_AF_CTRL, PCI_AF_CTRL_FLR);
+ msleep(100);
+
+ return 0;
+}
+
+static int pci_pm_reset(struct pci_dev *dev, int probe)
+{
+ u16 csr;
+
+ if (!dev->pm_cap)
+ return -ENOTTY;
+ pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &csr);
+ if (csr & PCI_PM_CTRL_NO_SOFT_RESET)
+ return -ENOTTY;
+
+ if (probe)
+ return 0;
+
+ if (dev->current_state != PCI_D0)
+ return -EINVAL;
+
+ csr &= ~PCI_PM_CTRL_STATE_MASK;
+ csr |= PCI_D3hot;
+ pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr);
+ msleep(pci_pm_d3_delay);
+
+ csr &= ~PCI_PM_CTRL_STATE_MASK;
+ csr |= PCI_D0;
+ pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr);
+ msleep(pci_pm_d3_delay);
+
+ return 0;
+}
+
+static int pci_parent_bus_reset(struct pci_dev *dev, int probe)
+{
+ u16 ctrl;
+ struct pci_dev *pdev;
+
+ if (pci_is_root_bus(dev->bus) || dev->subordinate || !dev->bus->self)
+ return -ENOTTY;
+
+ list_for_each_entry(pdev, &dev->bus->devices, bus_list)
+ if (pdev != dev)
+ return -ENOTTY;
+
+ if (probe)
+ return 0;
+
+ pci_read_config_word(dev->bus->self, PCI_BRIDGE_CONTROL, &ctrl);
+ ctrl |= PCI_BRIDGE_CTL_BUS_RESET;
+ pci_write_config_word(dev->bus->self, PCI_BRIDGE_CONTROL, ctrl);
msleep(100);
- pci_read_config_byte(dev, cappos + PCI_AF_STATUS, &status);
- if (!(status & PCI_AF_STATUS_TP))
- goto transaction_done;
-
- dev_info(&dev->dev, "Busy after 100ms while trying to"
- " reset; sleeping for 1 second\n");
- ssleep(1);
- pci_read_config_byte(dev, cappos + PCI_AF_STATUS, &status);
- if (status & PCI_AF_STATUS_TP)
- dev_info(&dev->dev, "Still busy after 1s; "
- "proceeding with reset anyway\n");
-
-transaction_done:
- pci_write_config_byte(dev, cappos + PCI_AF_CTRL, PCI_AF_CTRL_FLR);
- mdelay(100);
-
- pci_unblock_user_cfg_access(dev);
+
+ ctrl &= ~PCI_BRIDGE_CTL_BUS_RESET;
+ pci_write_config_word(dev->bus->self, PCI_BRIDGE_CONTROL, ctrl);
+ msleep(100);
+
return 0;
}
-static int __pci_reset_function(struct pci_dev *pdev, int probe)
+static int pci_dev_reset(struct pci_dev *dev, int probe)
{
- int res;
+ int rc;
+
+ might_sleep();
+
+ if (!probe) {
+ pci_block_user_cfg_access(dev);
+ /* block PM suspend, driver probe, etc. */
+ down(&dev->dev.sem);
+ }
+
+ rc = pcie_flr(dev, probe);
+ if (rc != -ENOTTY)
+ goto done;
- res = __pcie_flr(pdev, probe);
- if (res != -ENOTTY)
- return res;
+ rc = pci_af_flr(dev, probe);
+ if (rc != -ENOTTY)
+ goto done;
- res = __pci_af_flr(pdev, probe);
- if (res != -ENOTTY)
- return res;
+ rc = pci_pm_reset(dev, probe);
+ if (rc != -ENOTTY)
+ goto done;
- return res;
+ rc = pci_parent_bus_reset(dev, probe);
+done:
+ if (!probe) {
+ up(&dev->dev.sem);
+ pci_unblock_user_cfg_access(dev);
+ }
+
+ return rc;
}
/**
- * pci_execute_reset_function() - Reset a PCI device function
- * @dev: Device function to reset
+ * __pci_reset_function - reset a PCI device function
+ * @dev: PCI device to reset
*
* Some devices allow an individual function to be reset without affecting
* other functions in the same device. The PCI device must be responsive
@@ -2174,18 +2250,18 @@ static int __pci_reset_function(struct pci_dev *pdev, int probe)
* device including MSI, bus mastering, BARs, decoding IO and memory spaces,
* etc.
*
- * Returns 0 if the device function was successfully reset or -ENOTTY if the
+ * Returns 0 if the device function was successfully reset or negative if the
* device doesn't support resetting a single function.
*/
-int pci_execute_reset_function(struct pci_dev *dev)
+int __pci_reset_function(struct pci_dev *dev)
{
- return __pci_reset_function(dev, 0);
+ return pci_dev_reset(dev, 0);
}
-EXPORT_SYMBOL_GPL(pci_execute_reset_function);
+EXPORT_SYMBOL_GPL(__pci_reset_function);
/**
- * pci_reset_function() - quiesce and reset a PCI device function
- * @dev: Device function to reset
+ * pci_reset_function - quiesce and reset a PCI device function
+ * @dev: PCI device to reset
*
* Some devices allow an individual function to be reset without affecting
* other functions in the same device. The PCI device must be responsive
@@ -2193,32 +2269,33 @@ EXPORT_SYMBOL_GPL(pci_execute_reset_function);
*
* This function does not just reset the PCI portion of a device, but
* clears all the state associated with the device. This function differs
- * from pci_execute_reset_function in that it saves and restores device state
+ * from __pci_reset_function in that it saves and restores device state
* over the reset.
*
- * Returns 0 if the device function was successfully reset or -ENOTTY if the
+ * Returns 0 if the device function was successfully reset or negative if the
* device doesn't support resetting a single function.
*/
int pci_reset_function(struct pci_dev *dev)
{
- int r = __pci_reset_function(dev, 1);
+ int rc;
- if (r < 0)
- return r;
+ rc = pci_dev_reset(dev, 1);
+ if (rc)
+ return rc;
- if (!dev->msi_enabled && !dev->msix_enabled && dev->irq != 0)
- disable_irq(dev->irq);
pci_save_state(dev);
+ /*
+ * both INTx and MSI are disabled after the Interrupt Disable bit
+ * is set and the Bus Master bit is cleared.
+ */
pci_write_config_word(dev, PCI_COMMAND, PCI_COMMAND_INTX_DISABLE);
- r = pci_execute_reset_function(dev);
+ rc = pci_dev_reset(dev, 0);
pci_restore_state(dev);
- if (!dev->msi_enabled && !dev->msix_enabled && dev->irq != 0)
- enable_irq(dev->irq);
- return r;
+ return rc;
}
EXPORT_SYMBOL_GPL(pci_reset_function);
@@ -2591,6 +2668,8 @@ static int __init pci_setup(char *str)
} else if (!strncmp(str, "resource_alignment=", 19)) {
pci_set_resource_alignment_param(str + 19,
strlen(str + 19));
+ } else if (!strncmp(str, "ecrc=", 5)) {
+ pcie_ecrc_get_policy(str + 5);
} else {
printk(KERN_ERR "PCI: Unknown option `%s'\n",
str);
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index d03f6b99f292..f73bcbedf37c 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -229,6 +229,15 @@ struct pci_sriov {
u8 __iomem *mstate; /* VF Migration State Array */
};
+/* Address Translation Service */
+struct pci_ats {
+ int pos; /* capability position */
+ int stu; /* Smallest Translation Unit */
+ int qdep; /* Invalidate Queue Depth */
+ int ref_cnt; /* Physical Function reference count */
+ int is_enabled:1; /* Enable bit is set */
+};
+
#ifdef CONFIG_PCI_IOV
extern int pci_iov_init(struct pci_dev *dev);
extern void pci_iov_release(struct pci_dev *dev);
@@ -236,6 +245,20 @@ extern int pci_iov_resource_bar(struct pci_dev *dev, int resno,
enum pci_bar_type *type);
extern void pci_restore_iov_state(struct pci_dev *dev);
extern int pci_iov_bus_range(struct pci_bus *bus);
+
+extern int pci_enable_ats(struct pci_dev *dev, int ps);
+extern void pci_disable_ats(struct pci_dev *dev);
+extern int pci_ats_queue_depth(struct pci_dev *dev);
+/**
+ * pci_ats_enabled - query the ATS status
+ * @dev: the PCI device
+ *
+ * Returns 1 if ATS capability is enabled, or 0 if not.
+ */
+static inline int pci_ats_enabled(struct pci_dev *dev)
+{
+ return dev->ats && dev->ats->is_enabled;
+}
#else
static inline int pci_iov_init(struct pci_dev *dev)
{
@@ -257,6 +280,22 @@ static inline int pci_iov_bus_range(struct pci_bus *bus)
{
return 0;
}
+
+static inline int pci_enable_ats(struct pci_dev *dev, int ps)
+{
+ return -ENODEV;
+}
+static inline void pci_disable_ats(struct pci_dev *dev)
+{
+}
+static inline int pci_ats_queue_depth(struct pci_dev *dev)
+{
+ return -ENODEV;
+}
+static inline int pci_ats_enabled(struct pci_dev *dev)
+{
+ return 0;
+}
#endif /* CONFIG_PCI_IOV */
#endif /* DRIVERS_PCI_H */
diff --git a/drivers/pci/pcie/aer/Kconfig b/drivers/pci/pcie/aer/Kconfig
index c3bde588aa13..50e94e02378a 100644
--- a/drivers/pci/pcie/aer/Kconfig
+++ b/drivers/pci/pcie/aer/Kconfig
@@ -10,3 +10,18 @@ config PCIEAER
This enables PCI Express Root Port Advanced Error Reporting
(AER) driver support. Error reporting messages sent to Root
Port will be handled by PCI Express AER driver.
+
+
+#
+# PCI Express ECRC
+#
+config PCIE_ECRC
+ bool "PCI Express ECRC settings control"
+ depends on PCIEAER
+ help
+ Used to override firmware/bios settings for PCI Express ECRC
+ (transaction layer end-to-end CRC checking).
+
+ When in doubt, say N.
+
+source "drivers/pci/pcie/aer/Kconfig.debug"
diff --git a/drivers/pci/pcie/aer/Kconfig.debug b/drivers/pci/pcie/aer/Kconfig.debug
new file mode 100644
index 000000000000..b8c925c1f6aa
--- /dev/null
+++ b/drivers/pci/pcie/aer/Kconfig.debug
@@ -0,0 +1,18 @@
+#
+# PCI Express Root Port Device AER Debug Configuration
+#
+
+config PCIEAER_INJECT
+ tristate "PCIE AER error injector support"
+ depends on PCIEAER
+ default n
+ help
+ This enables PCI Express Root Port Advanced Error Reporting
+ (AER) software error injector.
+
+ Debuging PCIE AER code is quite difficult because it is hard
+ to trigger various real hardware errors. Software based
+ error injection can fake almost all kinds of errors with the
+ help of a user space helper tool aer-inject, which can be
+ gotten from:
+ http://www.kernel.org/pub/linux/utils/pci/aer-inject/
diff --git a/drivers/pci/pcie/aer/Makefile b/drivers/pci/pcie/aer/Makefile
index 8da3bd8455a8..2cba67510dc8 100644
--- a/drivers/pci/pcie/aer/Makefile
+++ b/drivers/pci/pcie/aer/Makefile
@@ -4,6 +4,9 @@
obj-$(CONFIG_PCIEAER) += aerdriver.o
+obj-$(CONFIG_PCIE_ECRC) += ecrc.o
+
aerdriver-objs := aerdrv_errprint.o aerdrv_core.o aerdrv.o
aerdriver-$(CONFIG_ACPI) += aerdrv_acpi.o
+obj-$(CONFIG_PCIEAER_INJECT) += aer_inject.o
diff --git a/drivers/pci/pcie/aer/aer_inject.c b/drivers/pci/pcie/aer/aer_inject.c
new file mode 100644
index 000000000000..d92ae21a59d8
--- /dev/null
+++ b/drivers/pci/pcie/aer/aer_inject.c
@@ -0,0 +1,473 @@
+/*
+ * PCIE AER software error injection support.
+ *
+ * Debuging PCIE AER code is quite difficult because it is hard to
+ * trigger various real hardware errors. Software based error
+ * injection can fake almost all kinds of errors with the help of a
+ * user space helper tool aer-inject, which can be gotten from:
+ * http://www.kernel.org/pub/linux/utils/pci/aer-inject/
+ *
+ * Copyright 2009 Intel Corporation.
+ * Huang Ying <ying.huang@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/miscdevice.h>
+#include <linux/pci.h>
+#include <linux/fs.h>
+#include <asm/uaccess.h>
+#include "aerdrv.h"
+
+struct aer_error_inj
+{
+ u8 bus;
+ u8 dev;
+ u8 fn;
+ u32 uncor_status;
+ u32 cor_status;
+ u32 header_log0;
+ u32 header_log1;
+ u32 header_log2;
+ u32 header_log3;
+};
+
+struct aer_error
+{
+ struct list_head list;
+ unsigned int bus;
+ unsigned int devfn;
+ int pos_cap_err;
+
+ u32 uncor_status;
+ u32 cor_status;
+ u32 header_log0;
+ u32 header_log1;
+ u32 header_log2;
+ u32 header_log3;
+ u32 root_status;
+ u32 source_id;
+};
+
+struct pci_bus_ops
+{
+ struct list_head list;
+ struct pci_bus *bus;
+ struct pci_ops *ops;
+};
+
+static LIST_HEAD(einjected);
+
+static LIST_HEAD(pci_bus_ops_list);
+
+/* Protect einjected and pci_bus_ops_list */
+static DEFINE_SPINLOCK(inject_lock);
+
+static void aer_error_init(struct aer_error *err, unsigned int bus,
+ unsigned int devfn, int pos_cap_err)
+{
+ INIT_LIST_HEAD(&err->list);
+ err->bus = bus;
+ err->devfn = devfn;
+ err->pos_cap_err = pos_cap_err;
+}
+
+/* inject_lock must be held before calling */
+static struct aer_error *__find_aer_error(unsigned int bus, unsigned int devfn)
+{
+ struct aer_error *err;
+
+ list_for_each_entry(err, &einjected, list) {
+ if (bus == err->bus && devfn == err->devfn)
+ return err;
+ }
+ return NULL;
+}
+
+/* inject_lock must be held before calling */
+static struct aer_error *__find_aer_error_by_dev(struct pci_dev *dev)
+{
+ return __find_aer_error(dev->bus->number, dev->devfn);
+}
+
+/* inject_lock must be held before calling */
+static struct pci_ops *__find_pci_bus_ops(struct pci_bus *bus)
+{
+ struct pci_bus_ops *bus_ops;
+
+ list_for_each_entry(bus_ops, &pci_bus_ops_list, list) {
+ if (bus_ops->bus == bus)
+ return bus_ops->ops;
+ }
+ return NULL;
+}
+
+static struct pci_bus_ops *pci_bus_ops_pop(void)
+{
+ unsigned long flags;
+ struct pci_bus_ops *bus_ops = NULL;
+
+ spin_lock_irqsave(&inject_lock, flags);
+ if (list_empty(&pci_bus_ops_list))
+ bus_ops = NULL;
+ else {
+ struct list_head *lh = pci_bus_ops_list.next;
+ list_del(lh);
+ bus_ops = list_entry(lh, struct pci_bus_ops, list);
+ }
+ spin_unlock_irqrestore(&inject_lock, flags);
+ return bus_ops;
+}
+
+static u32 *find_pci_config_dword(struct aer_error *err, int where,
+ int *prw1cs)
+{
+ int rw1cs = 0;
+ u32 *target = NULL;
+
+ if (err->pos_cap_err == -1)
+ return NULL;
+
+ switch (where - err->pos_cap_err) {
+ case PCI_ERR_UNCOR_STATUS:
+ target = &err->uncor_status;
+ rw1cs = 1;
+ break;
+ case PCI_ERR_COR_STATUS:
+ target = &err->cor_status;
+ rw1cs = 1;
+ break;
+ case PCI_ERR_HEADER_LOG:
+ target = &err->header_log0;
+ break;
+ case PCI_ERR_HEADER_LOG+4:
+ target = &err->header_log1;
+ break;
+ case PCI_ERR_HEADER_LOG+8:
+ target = &err->header_log2;
+ break;
+ case PCI_ERR_HEADER_LOG+12:
+ target = &err->header_log3;
+ break;
+ case PCI_ERR_ROOT_STATUS:
+ target = &err->root_status;
+ rw1cs = 1;
+ break;
+ case PCI_ERR_ROOT_COR_SRC:
+ target = &err->source_id;
+ break;
+ }
+ if (prw1cs)
+ *prw1cs = rw1cs;
+ return target;
+}
+
+static int pci_read_aer(struct pci_bus *bus, unsigned int devfn, int where,
+ int size, u32 *val)
+{
+ u32 *sim;
+ struct aer_error *err;
+ unsigned long flags;
+ struct pci_ops *ops;
+
+ spin_lock_irqsave(&inject_lock, flags);
+ if (size != sizeof(u32))
+ goto out;
+ err = __find_aer_error(bus->number, devfn);
+ if (!err)
+ goto out;
+
+ sim = find_pci_config_dword(err, where, NULL);
+ if (sim) {
+ *val = *sim;
+ spin_unlock_irqrestore(&inject_lock, flags);
+ return 0;
+ }
+out:
+ ops = __find_pci_bus_ops(bus);
+ spin_unlock_irqrestore(&inject_lock, flags);
+ return ops->read(bus, devfn, where, size, val);
+}
+
+int pci_write_aer(struct pci_bus *bus, unsigned int devfn, int where, int size,
+ u32 val)
+{
+ u32 *sim;
+ struct aer_error *err;
+ unsigned long flags;
+ int rw1cs;
+ struct pci_ops *ops;
+
+ spin_lock_irqsave(&inject_lock, flags);
+ if (size != sizeof(u32))
+ goto out;
+ err = __find_aer_error(bus->number, devfn);
+ if (!err)
+ goto out;
+
+ sim = find_pci_config_dword(err, where, &rw1cs);
+ if (sim) {
+ if (rw1cs)
+ *sim ^= val;
+ else
+ *sim = val;
+ spin_unlock_irqrestore(&inject_lock, flags);
+ return 0;
+ }
+out:
+ ops = __find_pci_bus_ops(bus);
+ spin_unlock_irqrestore(&inject_lock, flags);
+ return ops->write(bus, devfn, where, size, val);
+}
+
+static struct pci_ops pci_ops_aer = {
+ .read = pci_read_aer,
+ .write = pci_write_aer,
+};
+
+static void pci_bus_ops_init(struct pci_bus_ops *bus_ops,
+ struct pci_bus *bus,
+ struct pci_ops *ops)
+{
+ INIT_LIST_HEAD(&bus_ops->list);
+ bus_ops->bus = bus;
+ bus_ops->ops = ops;
+}
+
+static int pci_bus_set_aer_ops(struct pci_bus *bus)
+{
+ struct pci_ops *ops;
+ struct pci_bus_ops *bus_ops;
+ unsigned long flags;
+
+ bus_ops = kmalloc(sizeof(*bus_ops), GFP_KERNEL);
+ if (!bus_ops)
+ return -ENOMEM;
+ ops = pci_bus_set_ops(bus, &pci_ops_aer);
+ spin_lock_irqsave(&inject_lock, flags);
+ if (ops == &pci_ops_aer)
+ goto out;
+ pci_bus_ops_init(bus_ops, bus, ops);
+ list_add(&bus_ops->list, &pci_bus_ops_list);
+ bus_ops = NULL;
+out:
+ spin_unlock_irqrestore(&inject_lock, flags);
+ if (bus_ops)
+ kfree(bus_ops);
+ return 0;
+}
+
+static struct pci_dev *pcie_find_root_port(struct pci_dev *dev)
+{
+ while (1) {
+ if (!dev->is_pcie)
+ break;
+ if (dev->pcie_type == PCI_EXP_TYPE_ROOT_PORT)
+ return dev;
+ if (!dev->bus->self)
+ break;
+ dev = dev->bus->self;
+ }
+ return NULL;
+}
+
+static int find_aer_device_iter(struct device *device, void *data)
+{
+ struct pcie_device **result = data;
+ struct pcie_device *pcie_dev;
+
+ if (device->bus == &pcie_port_bus_type) {
+ pcie_dev = to_pcie_device(device);
+ if (pcie_dev->service & PCIE_PORT_SERVICE_AER) {
+ *result = pcie_dev;
+ return 1;
+ }
+ }
+ return 0;
+}
+
+static int find_aer_device(struct pci_dev *dev, struct pcie_device **result)
+{
+ return device_for_each_child(&dev->dev, result, find_aer_device_iter);
+}
+
+static int aer_inject(struct aer_error_inj *einj)
+{
+ struct aer_error *err, *rperr;
+ struct aer_error *err_alloc = NULL, *rperr_alloc = NULL;
+ struct pci_dev *dev, *rpdev;
+ struct pcie_device *edev;
+ unsigned long flags;
+ unsigned int devfn = PCI_DEVFN(einj->dev, einj->fn);
+ int pos_cap_err, rp_pos_cap_err;
+ u32 sever;
+ int ret = 0;
+
+ dev = pci_get_bus_and_slot(einj->bus, devfn);
+ if (!dev)
+ return -EINVAL;
+ rpdev = pcie_find_root_port(dev);
+ if (!rpdev) {
+ ret = -EINVAL;
+ goto out_put;
+ }
+
+ pos_cap_err = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
+ if (!pos_cap_err) {
+ ret = -EIO;
+ goto out_put;
+ }
+ pci_read_config_dword(dev, pos_cap_err + PCI_ERR_UNCOR_SEVER, &sever);
+
+ rp_pos_cap_err = pci_find_ext_capability(rpdev, PCI_EXT_CAP_ID_ERR);
+ if (!rp_pos_cap_err) {
+ ret = -EIO;
+ goto out_put;
+ }
+
+ err_alloc = kzalloc(sizeof(struct aer_error), GFP_KERNEL);
+ if (!err_alloc) {
+ ret = -ENOMEM;
+ goto out_put;
+ }
+ rperr_alloc = kzalloc(sizeof(struct aer_error), GFP_KERNEL);
+ if (!rperr_alloc) {
+ ret = -ENOMEM;
+ goto out_put;
+ }
+
+ spin_lock_irqsave(&inject_lock, flags);
+
+ err = __find_aer_error_by_dev(dev);
+ if (!err) {
+ err = err_alloc;
+ err_alloc = NULL;
+ aer_error_init(err, einj->bus, devfn, pos_cap_err);
+ list_add(&err->list, &einjected);
+ }
+ err->uncor_status |= einj->uncor_status;
+ err->cor_status |= einj->cor_status;
+ err->header_log0 = einj->header_log0;
+ err->header_log1 = einj->header_log1;
+ err->header_log2 = einj->header_log2;
+ err->header_log3 = einj->header_log3;
+
+ rperr = __find_aer_error_by_dev(rpdev);
+ if (!rperr) {
+ rperr = rperr_alloc;
+ rperr_alloc = NULL;
+ aer_error_init(rperr, rpdev->bus->number, rpdev->devfn,
+ rp_pos_cap_err);
+ list_add(&rperr->list, &einjected);
+ }
+ if (einj->cor_status) {
+ if (rperr->root_status & PCI_ERR_ROOT_COR_RCV)
+ rperr->root_status |= PCI_ERR_ROOT_MULTI_COR_RCV;
+ else
+ rperr->root_status |= PCI_ERR_ROOT_COR_RCV;
+ rperr->source_id &= 0xffff0000;
+ rperr->source_id |= (einj->bus << 8) | devfn;
+ }
+ if (einj->uncor_status) {
+ if (rperr->root_status & PCI_ERR_ROOT_UNCOR_RCV)
+ rperr->root_status |= PCI_ERR_ROOT_MULTI_UNCOR_RCV;
+ if (sever & einj->uncor_status) {
+ rperr->root_status |= PCI_ERR_ROOT_FATAL_RCV;
+ if (!(rperr->root_status & PCI_ERR_ROOT_UNCOR_RCV))
+ rperr->root_status |= PCI_ERR_ROOT_FIRST_FATAL;
+ } else
+ rperr->root_status |= PCI_ERR_ROOT_NONFATAL_RCV;
+ rperr->root_status |= PCI_ERR_ROOT_UNCOR_RCV;
+ rperr->source_id &= 0x0000ffff;
+ rperr->source_id |= ((einj->bus << 8) | devfn) << 16;
+ }
+ spin_unlock_irqrestore(&inject_lock, flags);
+
+ ret = pci_bus_set_aer_ops(dev->bus);
+ if (ret)
+ goto out_put;
+ ret = pci_bus_set_aer_ops(rpdev->bus);
+ if (ret)
+ goto out_put;
+
+ if (find_aer_device(rpdev, &edev))
+ aer_irq(-1, edev);
+ else
+ ret = -EINVAL;
+out_put:
+ if (err_alloc)
+ kfree(err_alloc);
+ if (rperr_alloc)
+ kfree(rperr_alloc);
+ pci_dev_put(dev);
+ return ret;
+}
+
+static ssize_t aer_inject_write(struct file *filp, const char __user *ubuf,
+ size_t usize, loff_t *off)
+{
+ struct aer_error_inj einj;
+ int ret;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ if (usize != sizeof(struct aer_error_inj))
+ return -EINVAL;
+
+ if (copy_from_user(&einj, ubuf, usize))
+ return -EFAULT;
+
+ ret = aer_inject(&einj);
+ return ret ? ret : usize;
+}
+
+static const struct file_operations aer_inject_fops = {
+ .write = aer_inject_write,
+ .owner = THIS_MODULE,
+};
+
+static struct miscdevice aer_inject_device = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "aer_inject",
+ .fops = &aer_inject_fops,
+};
+
+static int __init aer_inject_init(void)
+{
+ return misc_register(&aer_inject_device);
+}
+
+static void __exit aer_inject_exit(void)
+{
+ struct aer_error *err, *err_next;
+ unsigned long flags;
+ struct pci_bus_ops *bus_ops;
+
+ misc_deregister(&aer_inject_device);
+
+ while ((bus_ops = pci_bus_ops_pop())) {
+ pci_bus_set_ops(bus_ops->bus, bus_ops->ops);
+ kfree(bus_ops);
+ }
+
+ spin_lock_irqsave(&inject_lock, flags);
+ list_for_each_entry_safe(err, err_next,
+ &pci_bus_ops_list, list) {
+ list_del(&err->list);
+ kfree(err);
+ }
+ spin_unlock_irqrestore(&inject_lock, flags);
+}
+
+module_init(aer_inject_init);
+module_exit(aer_inject_exit);
+
+MODULE_DESCRIPTION("PCIE AER software error injector");
+MODULE_LICENSE("GPL");
diff --git a/drivers/pci/pcie/aer/aerdrv.c b/drivers/pci/pcie/aer/aerdrv.c
index 32ade5af927e..4770f13b3ca1 100644
--- a/drivers/pci/pcie/aer/aerdrv.c
+++ b/drivers/pci/pcie/aer/aerdrv.c
@@ -77,7 +77,7 @@ void pci_no_aer(void)
*
* Invoked when Root Port detects AER messages.
**/
-static irqreturn_t aer_irq(int irq, void *context)
+irqreturn_t aer_irq(int irq, void *context)
{
unsigned int status, id;
struct pcie_device *pdev = (struct pcie_device *)context;
@@ -126,6 +126,7 @@ static irqreturn_t aer_irq(int irq, void *context)
return IRQ_HANDLED;
}
+EXPORT_SYMBOL_GPL(aer_irq);
/**
* aer_alloc_rpc - allocate Root Port data structure
diff --git a/drivers/pci/pcie/aer/aerdrv.h b/drivers/pci/pcie/aer/aerdrv.h
index aa14482a4779..bbd7428ca2d0 100644
--- a/drivers/pci/pcie/aer/aerdrv.h
+++ b/drivers/pci/pcie/aer/aerdrv.h
@@ -11,6 +11,7 @@
#include <linux/workqueue.h>
#include <linux/pcieport_if.h>
#include <linux/aer.h>
+#include <linux/interrupt.h>
#define AER_NONFATAL 0
#define AER_FATAL 1
@@ -56,7 +57,11 @@ struct header_log_regs {
unsigned int dw3;
};
+#define AER_MAX_MULTI_ERR_DEVICES 5 /* Not likely to have more */
struct aer_err_info {
+ struct pci_dev *dev[AER_MAX_MULTI_ERR_DEVICES];
+ int error_dev_num;
+ u16 id;
int severity; /* 0:NONFATAL | 1:FATAL | 2:COR */
int flags;
unsigned int status; /* COR/UNCOR Error Status */
@@ -120,6 +125,7 @@ extern void aer_delete_rootport(struct aer_rpc *rpc);
extern int aer_init(struct pcie_device *dev);
extern void aer_isr(struct work_struct *work);
extern void aer_print_error(struct pci_dev *dev, struct aer_err_info *info);
+extern irqreturn_t aer_irq(int irq, void *context);
#ifdef CONFIG_ACPI
extern int aer_osc_setup(struct pcie_device *pciedev);
diff --git a/drivers/pci/pcie/aer/aerdrv_core.c b/drivers/pci/pcie/aer/aerdrv_core.c
index 307452f30035..3d8872704a58 100644
--- a/drivers/pci/pcie/aer/aerdrv_core.c
+++ b/drivers/pci/pcie/aer/aerdrv_core.c
@@ -26,7 +26,9 @@
#include "aerdrv.h"
static int forceload;
+static int nosourceid;
module_param(forceload, bool, 0);
+module_param(nosourceid, bool, 0);
int pci_enable_pcie_error_reporting(struct pci_dev *dev)
{
@@ -109,19 +111,23 @@ int pci_cleanup_aer_correct_error_status(struct pci_dev *dev)
#endif /* 0 */
-static void set_device_error_reporting(struct pci_dev *dev, void *data)
+static int set_device_error_reporting(struct pci_dev *dev, void *data)
{
bool enable = *((bool *)data);
- if (dev->pcie_type != PCIE_RC_PORT &&
- dev->pcie_type != PCIE_SW_UPSTREAM_PORT &&
- dev->pcie_type != PCIE_SW_DOWNSTREAM_PORT)
- return;
+ if (dev->pcie_type == PCIE_RC_PORT ||
+ dev->pcie_type == PCIE_SW_UPSTREAM_PORT ||
+ dev->pcie_type == PCIE_SW_DOWNSTREAM_PORT) {
+ if (enable)
+ pci_enable_pcie_error_reporting(dev);
+ else
+ pci_disable_pcie_error_reporting(dev);
+ }
if (enable)
- pci_enable_pcie_error_reporting(dev);
- else
- pci_disable_pcie_error_reporting(dev);
+ pcie_set_ecrc_checking(dev);
+
+ return 0;
}
/**
@@ -139,73 +145,148 @@ static void set_downstream_devices_error_reporting(struct pci_dev *dev,
pci_walk_bus(dev->subordinate, set_device_error_reporting, &enable);
}
-static int find_device_iter(struct device *device, void *data)
+static inline int compare_device_id(struct pci_dev *dev,
+ struct aer_err_info *e_info)
{
- struct pci_dev *dev;
- u16 id = *(unsigned long *)data;
- u8 secondary, subordinate, d_bus = id >> 8;
+ if (e_info->id == ((dev->bus->number << 8) | dev->devfn)) {
+ /*
+ * Device ID match
+ */
+ return 1;
+ }
- if (device->bus == &pci_bus_type) {
- dev = to_pci_dev(device);
- if (id == ((dev->bus->number << 8) | dev->devfn)) {
- /*
- * Device ID match
- */
- *(unsigned long*)data = (unsigned long)device;
- return 1;
- }
+ return 0;
+}
+
+static int add_error_device(struct aer_err_info *e_info, struct pci_dev *dev)
+{
+ if (e_info->error_dev_num < AER_MAX_MULTI_ERR_DEVICES) {
+ e_info->dev[e_info->error_dev_num] = dev;
+ e_info->error_dev_num++;
+ return 1;
+ } else
+ return 0;
+}
+
+
+#define PCI_BUS(x) (((x) >> 8) & 0xff)
+
+static int find_device_iter(struct pci_dev *dev, void *data)
+{
+ int pos;
+ u32 status;
+ u32 mask;
+ u16 reg16;
+ int result;
+ struct aer_err_info *e_info = (struct aer_err_info *)data;
+
+ /*
+ * When bus id is equal to 0, it might be a bad id
+ * reported by root port.
+ */
+ if (!nosourceid && (PCI_BUS(e_info->id) != 0)) {
+ result = compare_device_id(dev, e_info);
+ if (result)
+ add_error_device(e_info, dev);
/*
- * If device is P2P, check if it is an upstream?
+ * If there is no multiple error, we stop
+ * or continue based on the id comparing.
*/
- if (dev->hdr_type & PCI_HEADER_TYPE_BRIDGE) {
- pci_read_config_byte(dev, PCI_SECONDARY_BUS,
- &secondary);
- pci_read_config_byte(dev, PCI_SUBORDINATE_BUS,
- &subordinate);
- if (d_bus >= secondary && d_bus <= subordinate) {
- *(unsigned long*)data = (unsigned long)device;
- return 1;
- }
+ if (!(e_info->flags & AER_MULTI_ERROR_VALID_FLAG))
+ return result;
+
+ /*
+ * If there are multiple errors and id does match,
+ * We need continue to search other devices under
+ * the root port. Return 0 means that.
+ */
+ if (result)
+ return 0;
+ }
+
+ /*
+ * When either
+ * 1) nosourceid==y;
+ * 2) bus id is equal to 0. Some ports might lose the bus
+ * id of error source id;
+ * 3) There are multiple errors and prior id comparing fails;
+ * We check AER status registers to find the initial reporter.
+ */
+ if (atomic_read(&dev->enable_cnt) == 0)
+ return 0;
+ pos = pci_find_capability(dev, PCI_CAP_ID_EXP);
+ if (!pos)
+ return 0;
+ /* Check if AER is enabled */
+ pci_read_config_word(dev, pos+PCI_EXP_DEVCTL, &reg16);
+ if (!(reg16 & (
+ PCI_EXP_DEVCTL_CERE |
+ PCI_EXP_DEVCTL_NFERE |
+ PCI_EXP_DEVCTL_FERE |
+ PCI_EXP_DEVCTL_URRE)))
+ return 0;
+ pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
+ if (!pos)
+ return 0;
+
+ status = 0;
+ mask = 0;
+ if (e_info->severity == AER_CORRECTABLE) {
+ pci_read_config_dword(dev,
+ pos + PCI_ERR_COR_STATUS,
+ &status);
+ pci_read_config_dword(dev,
+ pos + PCI_ERR_COR_MASK,
+ &mask);
+ if (status & ERR_CORRECTABLE_ERROR_MASK & ~mask) {
+ add_error_device(e_info, dev);
+ goto added;
+ }
+ } else {
+ pci_read_config_dword(dev,
+ pos + PCI_ERR_UNCOR_STATUS,
+ &status);
+ pci_read_config_dword(dev,
+ pos + PCI_ERR_UNCOR_MASK,
+ &mask);
+ if (status & ERR_UNCORRECTABLE_ERROR_MASK & ~mask) {
+ add_error_device(e_info, dev);
+ goto added;
}
}
return 0;
+
+added:
+ if (e_info->flags & AER_MULTI_ERROR_VALID_FLAG)
+ return 0;
+ else
+ return 1;
}
/**
* find_source_device - search through device hierarchy for source device
* @parent: pointer to Root Port pci_dev data structure
- * @id: device ID of agent who sends an error message to this Root Port
+ * @err_info: including detailed error information such like id
*
* Invoked when error is detected at the Root Port.
*/
-static struct device* find_source_device(struct pci_dev *parent, u16 id)
+static void find_source_device(struct pci_dev *parent,
+ struct aer_err_info *e_info)
{
struct pci_dev *dev = parent;
- struct device *device;
- unsigned long device_addr;
- int status;
+ int result;
/* Is Root Port an agent that sends error message? */
- if (id == ((dev->bus->number << 8) | dev->devfn))
- return &dev->dev;
-
- do {
- device_addr = id;
- if ((status = device_for_each_child(&dev->dev,
- &device_addr, find_device_iter))) {
- device = (struct device*)device_addr;
- dev = to_pci_dev(device);
- if (id == ((dev->bus->number << 8) | dev->devfn))
- return device;
- }
- }while (status);
+ result = find_device_iter(dev, e_info);
+ if (result)
+ return;
- return NULL;
+ pci_walk_bus(parent->subordinate, find_device_iter, e_info);
}
-static void report_error_detected(struct pci_dev *dev, void *data)
+static int report_error_detected(struct pci_dev *dev, void *data)
{
pci_ers_result_t vote;
struct pci_error_handlers *err_handler;
@@ -230,16 +311,16 @@ static void report_error_detected(struct pci_dev *dev, void *data)
dev->driver ?
"no AER-aware driver" : "no driver");
}
- return;
+ return 0;
}
err_handler = dev->driver->err_handler;
vote = err_handler->error_detected(dev, result_data->state);
result_data->result = merge_result(result_data->result, vote);
- return;
+ return 0;
}
-static void report_mmio_enabled(struct pci_dev *dev, void *data)
+static int report_mmio_enabled(struct pci_dev *dev, void *data)
{
pci_ers_result_t vote;
struct pci_error_handlers *err_handler;
@@ -249,15 +330,15 @@ static void report_mmio_enabled(struct pci_dev *dev, void *data)
if (!dev->driver ||
!dev->driver->err_handler ||
!dev->driver->err_handler->mmio_enabled)
- return;
+ return 0;
err_handler = dev->driver->err_handler;
vote = err_handler->mmio_enabled(dev);
result_data->result = merge_result(result_data->result, vote);
- return;
+ return 0;
}
-static void report_slot_reset(struct pci_dev *dev, void *data)
+static int report_slot_reset(struct pci_dev *dev, void *data)
{
pci_ers_result_t vote;
struct pci_error_handlers *err_handler;
@@ -267,15 +348,15 @@ static void report_slot_reset(struct pci_dev *dev, void *data)
if (!dev->driver ||
!dev->driver->err_handler ||
!dev->driver->err_handler->slot_reset)
- return;
+ return 0;
err_handler = dev->driver->err_handler;
vote = err_handler->slot_reset(dev);
result_data->result = merge_result(result_data->result, vote);
- return;
+ return 0;
}
-static void report_resume(struct pci_dev *dev, void *data)
+static int report_resume(struct pci_dev *dev, void *data)
{
struct pci_error_handlers *err_handler;
@@ -284,11 +365,11 @@ static void report_resume(struct pci_dev *dev, void *data)
if (!dev->driver ||
!dev->driver->err_handler ||
!dev->driver->err_handler->resume)
- return;
+ return 0;
err_handler = dev->driver->err_handler;
err_handler->resume(dev);
- return;
+ return 0;
}
/**
@@ -305,7 +386,7 @@ static void report_resume(struct pci_dev *dev, void *data)
static pci_ers_result_t broadcast_error_message(struct pci_dev *dev,
enum pci_channel_state state,
char *error_mesg,
- void (*cb)(struct pci_dev *, void *))
+ int (*cb)(struct pci_dev *, void *))
{
struct aer_broadcast_data result_data;
@@ -497,12 +578,12 @@ static pci_ers_result_t do_recovery(struct pcie_device *aerdev,
*/
static void handle_error_source(struct pcie_device * aerdev,
struct pci_dev *dev,
- struct aer_err_info info)
+ struct aer_err_info *info)
{
pci_ers_result_t status = 0;
int pos;
- if (info.severity == AER_CORRECTABLE) {
+ if (info->severity == AER_CORRECTABLE) {
/*
* Correctable error does not need software intevention.
* No need to go through error recovery process.
@@ -510,9 +591,9 @@ static void handle_error_source(struct pcie_device * aerdev,
pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
if (pos)
pci_write_config_dword(dev, pos + PCI_ERR_COR_STATUS,
- info.status);
+ info->status);
} else {
- status = do_recovery(aerdev, dev, info.severity);
+ status = do_recovery(aerdev, dev, info->severity);
if (status == PCI_ERS_RESULT_RECOVERED) {
dev_printk(KERN_DEBUG, &dev->dev, "AER driver "
"successfully recovered\n");
@@ -661,6 +742,28 @@ static int get_device_error_info(struct pci_dev *dev, struct aer_err_info *info)
return AER_SUCCESS;
}
+static inline void aer_process_err_devices(struct pcie_device *p_device,
+ struct aer_err_info *e_info)
+{
+ int i;
+
+ if (!e_info->dev[0]) {
+ dev_printk(KERN_DEBUG, &p_device->port->dev,
+ "can't find device of ID%04x\n",
+ e_info->id);
+ }
+
+ for (i = 0; i < e_info->error_dev_num && e_info->dev[i]; i++) {
+ if (get_device_error_info(e_info->dev[i], e_info) ==
+ AER_SUCCESS) {
+ aer_print_error(e_info->dev[i], e_info);
+ handle_error_source(p_device,
+ e_info->dev[i],
+ e_info);
+ }
+ }
+}
+
/**
* aer_isr_one_error - consume an error detected by root port
* @p_device: pointer to error root port service device
@@ -669,10 +772,16 @@ static int get_device_error_info(struct pci_dev *dev, struct aer_err_info *info)
static void aer_isr_one_error(struct pcie_device *p_device,
struct aer_err_source *e_src)
{
- struct device *s_device;
- struct aer_err_info e_info = {0, 0, 0,};
+ struct aer_err_info *e_info;
int i;
- u16 id;
+
+ /* struct aer_err_info might be big, so we allocate it with slab */
+ e_info = kmalloc(sizeof(struct aer_err_info), GFP_KERNEL);
+ if (e_info == NULL) {
+ dev_printk(KERN_DEBUG, &p_device->port->dev,
+ "Can't allocate mem when processing AER errors\n");
+ return;
+ }
/*
* There is a possibility that both correctable error and
@@ -684,31 +793,26 @@ static void aer_isr_one_error(struct pcie_device *p_device,
if (!(e_src->status & i))
continue;
+ memset(e_info, 0, sizeof(struct aer_err_info));
+
/* Init comprehensive error information */
if (i & PCI_ERR_ROOT_COR_RCV) {
- id = ERR_COR_ID(e_src->id);
- e_info.severity = AER_CORRECTABLE;
+ e_info->id = ERR_COR_ID(e_src->id);
+ e_info->severity = AER_CORRECTABLE;
} else {
- id = ERR_UNCOR_ID(e_src->id);
- e_info.severity = ((e_src->status >> 6) & 1);
+ e_info->id = ERR_UNCOR_ID(e_src->id);
+ e_info->severity = ((e_src->status >> 6) & 1);
}
if (e_src->status &
(PCI_ERR_ROOT_MULTI_COR_RCV |
PCI_ERR_ROOT_MULTI_UNCOR_RCV))
- e_info.flags |= AER_MULTI_ERROR_VALID_FLAG;
- if (!(s_device = find_source_device(p_device->port, id))) {
- printk(KERN_DEBUG "%s->can't find device of ID%04x\n",
- __func__, id);
- continue;
- }
- if (get_device_error_info(to_pci_dev(s_device), &e_info) ==
- AER_SUCCESS) {
- aer_print_error(to_pci_dev(s_device), &e_info);
- handle_error_source(p_device,
- to_pci_dev(s_device),
- e_info);
- }
+ e_info->flags |= AER_MULTI_ERROR_VALID_FLAG;
+
+ find_source_device(p_device->port, e_info);
+ aer_process_err_devices(p_device, e_info);
}
+
+ kfree(e_info);
}
/**
diff --git a/drivers/pci/pcie/aer/ecrc.c b/drivers/pci/pcie/aer/ecrc.c
new file mode 100644
index 000000000000..a928d8ab6bda
--- /dev/null
+++ b/drivers/pci/pcie/aer/ecrc.c
@@ -0,0 +1,131 @@
+/*
+ * Enables/disables PCIe ECRC checking.
+ *
+ * (C) Copyright 2009 Hewlett-Packard Development Company, L.P.
+ * Andrew Patterson <andrew.patterson@hp.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ * 02111-1307, USA.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/pci.h>
+#include <linux/pci_regs.h>
+#include <linux/errno.h>
+#include "../../pci.h"
+
+#define ECRC_POLICY_DEFAULT 0 /* ECRC set by BIOS */
+#define ECRC_POLICY_OFF 1 /* ECRC off for performance */
+#define ECRC_POLICY_ON 2 /* ECRC on for data integrity */
+
+static int ecrc_policy = ECRC_POLICY_DEFAULT;
+
+static const char *ecrc_policy_str[] = {
+ [ECRC_POLICY_DEFAULT] = "bios",
+ [ECRC_POLICY_OFF] = "off",
+ [ECRC_POLICY_ON] = "on"
+};
+
+/**
+ * enable_ercr_checking - enable PCIe ECRC checking for a device
+ * @dev: the PCI device
+ *
+ * Returns 0 on success, or negative on failure.
+ */
+static int enable_ecrc_checking(struct pci_dev *dev)
+{
+ int pos;
+ u32 reg32;
+
+ if (!dev->is_pcie)
+ return -ENODEV;
+
+ pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
+ if (!pos)
+ return -ENODEV;
+
+ pci_read_config_dword(dev, pos + PCI_ERR_CAP, &reg32);
+ if (reg32 & PCI_ERR_CAP_ECRC_GENC)
+ reg32 |= PCI_ERR_CAP_ECRC_GENE;
+ if (reg32 & PCI_ERR_CAP_ECRC_CHKC)
+ reg32 |= PCI_ERR_CAP_ECRC_CHKE;
+ pci_write_config_dword(dev, pos + PCI_ERR_CAP, reg32);
+
+ return 0;
+}
+
+/**
+ * disable_ercr_checking - disables PCIe ECRC checking for a device
+ * @dev: the PCI device
+ *
+ * Returns 0 on success, or negative on failure.
+ */
+static int disable_ecrc_checking(struct pci_dev *dev)
+{
+ int pos;
+ u32 reg32;
+
+ if (!dev->is_pcie)
+ return -ENODEV;
+
+ pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
+ if (!pos)
+ return -ENODEV;
+
+ pci_read_config_dword(dev, pos + PCI_ERR_CAP, &reg32);
+ reg32 &= ~(PCI_ERR_CAP_ECRC_GENE | PCI_ERR_CAP_ECRC_CHKE);
+ pci_write_config_dword(dev, pos + PCI_ERR_CAP, reg32);
+
+ return 0;
+}
+
+/**
+ * pcie_set_ecrc_checking - set/unset PCIe ECRC checking for a device based on global policy
+ * @dev: the PCI device
+ */
+void pcie_set_ecrc_checking(struct pci_dev *dev)
+{
+ switch (ecrc_policy) {
+ case ECRC_POLICY_DEFAULT:
+ return;
+ case ECRC_POLICY_OFF:
+ disable_ecrc_checking(dev);
+ break;
+ case ECRC_POLICY_ON:
+ enable_ecrc_checking(dev);
+ break;
+ default:
+ return;
+ }
+}
+
+/**
+ * pcie_ecrc_get_policy - parse kernel command-line ecrc option
+ */
+void pcie_ecrc_get_policy(char *str)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(ecrc_policy_str); i++)
+ if (!strncmp(str, ecrc_policy_str[i],
+ strlen(ecrc_policy_str[i])))
+ break;
+ if (i >= ARRAY_SIZE(ecrc_policy_str))
+ return;
+
+ ecrc_policy = i;
+}
diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
index b0367f168af4..3d27c97e0486 100644
--- a/drivers/pci/pcie/aspm.c
+++ b/drivers/pci/pcie/aspm.c
@@ -26,40 +26,36 @@
#endif
#define MODULE_PARAM_PREFIX "pcie_aspm."
-struct endpoint_state {
- unsigned int l0s_acceptable_latency;
- unsigned int l1_acceptable_latency;
+struct aspm_latency {
+ u32 l0s; /* L0s latency (nsec) */
+ u32 l1; /* L1 latency (nsec) */
};
struct pcie_link_state {
- struct list_head sibiling;
- struct pci_dev *pdev;
- bool downstream_has_switch;
-
- struct pcie_link_state *parent;
- struct list_head children;
- struct list_head link;
+ struct pci_dev *pdev; /* Upstream component of the Link */
+ struct pcie_link_state *root; /* pointer to the root port link */
+ struct pcie_link_state *parent; /* pointer to the parent Link state */
+ struct list_head sibling; /* node in link_list */
+ struct list_head children; /* list of child link states */
+ struct list_head link; /* node in parent's children list */
/* ASPM state */
- unsigned int support_state;
- unsigned int enabled_state;
- unsigned int bios_aspm_state;
- /* upstream component */
- unsigned int l0s_upper_latency;
- unsigned int l1_upper_latency;
- /* downstream component */
- unsigned int l0s_down_latency;
- unsigned int l1_down_latency;
- /* Clock PM state*/
- unsigned int clk_pm_capable;
- unsigned int clk_pm_enabled;
- unsigned int bios_clk_state;
+ u32 aspm_support:2; /* Supported ASPM state */
+ u32 aspm_enabled:2; /* Enabled ASPM state */
+ u32 aspm_default:2; /* Default ASPM state by BIOS */
+
+ /* Clock PM state */
+ u32 clkpm_capable:1; /* Clock PM capable? */
+ u32 clkpm_enabled:1; /* Current Clock PM state */
+ u32 clkpm_default:1; /* Default Clock PM state by BIOS */
+ /* Latencies */
+ struct aspm_latency latency; /* Exit latency */
/*
- * A pcie downstream port only has one slot under it, so at most there
- * are 8 functions
+ * Endpoint acceptable latencies. A pcie downstream port only
+ * has one slot under it, so at most there are 8 functions.
*/
- struct endpoint_state endpoints[8];
+ struct aspm_latency acceptable[8];
};
static int aspm_disabled, aspm_force;
@@ -78,27 +74,23 @@ static const char *policy_str[] = {
#define LINK_RETRAIN_TIMEOUT HZ
-static int policy_to_aspm_state(struct pci_dev *pdev)
+static int policy_to_aspm_state(struct pcie_link_state *link)
{
- struct pcie_link_state *link_state = pdev->link_state;
-
switch (aspm_policy) {
case POLICY_PERFORMANCE:
/* Disable ASPM and Clock PM */
return 0;
case POLICY_POWERSAVE:
/* Enable ASPM L0s/L1 */
- return PCIE_LINK_STATE_L0S|PCIE_LINK_STATE_L1;
+ return PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1;
case POLICY_DEFAULT:
- return link_state->bios_aspm_state;
+ return link->aspm_default;
}
return 0;
}
-static int policy_to_clkpm_state(struct pci_dev *pdev)
+static int policy_to_clkpm_state(struct pcie_link_state *link)
{
- struct pcie_link_state *link_state = pdev->link_state;
-
switch (aspm_policy) {
case POLICY_PERFORMANCE:
/* Disable ASPM and Clock PM */
@@ -107,73 +99,78 @@ static int policy_to_clkpm_state(struct pci_dev *pdev)
/* Disable Clock PM */
return 1;
case POLICY_DEFAULT:
- return link_state->bios_clk_state;
+ return link->clkpm_default;
}
return 0;
}
-static void pcie_set_clock_pm(struct pci_dev *pdev, int enable)
+static void pcie_set_clkpm_nocheck(struct pcie_link_state *link, int enable)
{
- struct pci_dev *child_dev;
int pos;
u16 reg16;
- struct pcie_link_state *link_state = pdev->link_state;
+ struct pci_dev *child;
+ struct pci_bus *linkbus = link->pdev->subordinate;
- list_for_each_entry(child_dev, &pdev->subordinate->devices, bus_list) {
- pos = pci_find_capability(child_dev, PCI_CAP_ID_EXP);
+ list_for_each_entry(child, &linkbus->devices, bus_list) {
+ pos = pci_find_capability(child, PCI_CAP_ID_EXP);
if (!pos)
return;
- pci_read_config_word(child_dev, pos + PCI_EXP_LNKCTL, &reg16);
+ pci_read_config_word(child, pos + PCI_EXP_LNKCTL, &reg16);
if (enable)
reg16 |= PCI_EXP_LNKCTL_CLKREQ_EN;
else
reg16 &= ~PCI_EXP_LNKCTL_CLKREQ_EN;
- pci_write_config_word(child_dev, pos + PCI_EXP_LNKCTL, reg16);
+ pci_write_config_word(child, pos + PCI_EXP_LNKCTL, reg16);
}
- link_state->clk_pm_enabled = !!enable;
+ link->clkpm_enabled = !!enable;
}
-static void pcie_check_clock_pm(struct pci_dev *pdev, int blacklist)
+static void pcie_set_clkpm(struct pcie_link_state *link, int enable)
{
- int pos;
+ /* Don't enable Clock PM if the link is not Clock PM capable */
+ if (!link->clkpm_capable && enable)
+ return;
+ /* Need nothing if the specified equals to current state */
+ if (link->clkpm_enabled == enable)
+ return;
+ pcie_set_clkpm_nocheck(link, enable);
+}
+
+static void pcie_clkpm_cap_init(struct pcie_link_state *link, int blacklist)
+{
+ int pos, capable = 1, enabled = 1;
u32 reg32;
u16 reg16;
- int capable = 1, enabled = 1;
- struct pci_dev *child_dev;
- struct pcie_link_state *link_state = pdev->link_state;
+ struct pci_dev *child;
+ struct pci_bus *linkbus = link->pdev->subordinate;
/* All functions should have the same cap and state, take the worst */
- list_for_each_entry(child_dev, &pdev->subordinate->devices, bus_list) {
- pos = pci_find_capability(child_dev, PCI_CAP_ID_EXP);
+ list_for_each_entry(child, &linkbus->devices, bus_list) {
+ pos = pci_find_capability(child, PCI_CAP_ID_EXP);
if (!pos)
return;
- pci_read_config_dword(child_dev, pos + PCI_EXP_LNKCAP, &reg32);
+ pci_read_config_dword(child, pos + PCI_EXP_LNKCAP, &reg32);
if (!(reg32 & PCI_EXP_LNKCAP_CLKPM)) {
capable = 0;
enabled = 0;
break;
}
- pci_read_config_word(child_dev, pos + PCI_EXP_LNKCTL, &reg16);
+ pci_read_config_word(child, pos + PCI_EXP_LNKCTL, &reg16);
if (!(reg16 & PCI_EXP_LNKCTL_CLKREQ_EN))
enabled = 0;
}
- link_state->clk_pm_enabled = enabled;
- link_state->bios_clk_state = enabled;
- if (!blacklist) {
- link_state->clk_pm_capable = capable;
- pcie_set_clock_pm(pdev, policy_to_clkpm_state(pdev));
- } else {
- link_state->clk_pm_capable = 0;
- pcie_set_clock_pm(pdev, 0);
- }
+ link->clkpm_enabled = enabled;
+ link->clkpm_default = enabled;
+ link->clkpm_capable = (blacklist) ? 0 : capable;
}
-static bool pcie_aspm_downstream_has_switch(struct pci_dev *pdev)
+static bool pcie_aspm_downstream_has_switch(struct pcie_link_state *link)
{
- struct pci_dev *child_dev;
+ struct pci_dev *child;
+ struct pci_bus *linkbus = link->pdev->subordinate;
- list_for_each_entry(child_dev, &pdev->subordinate->devices, bus_list) {
- if (child_dev->pcie_type == PCI_EXP_TYPE_UPSTREAM)
+ list_for_each_entry(child, &linkbus->devices, bus_list) {
+ if (child->pcie_type == PCI_EXP_TYPE_UPSTREAM)
return true;
}
return false;
@@ -184,289 +181,263 @@ static bool pcie_aspm_downstream_has_switch(struct pci_dev *pdev)
* could use common clock. If they are, configure them to use the
* common clock. That will reduce the ASPM state exit latency.
*/
-static void pcie_aspm_configure_common_clock(struct pci_dev *pdev)
+static void pcie_aspm_configure_common_clock(struct pcie_link_state *link)
{
- int pos, child_pos, i = 0;
- u16 reg16 = 0;
- struct pci_dev *child_dev;
- int same_clock = 1;
+ int ppos, cpos, same_clock = 1;
+ u16 reg16, parent_reg, child_reg[8];
unsigned long start_jiffies;
- u16 child_regs[8], parent_reg;
+ struct pci_dev *child, *parent = link->pdev;
+ struct pci_bus *linkbus = parent->subordinate;
/*
- * all functions of a slot should have the same Slot Clock
+ * All functions of a slot should have the same Slot Clock
* Configuration, so just check one function
- * */
- child_dev = list_entry(pdev->subordinate->devices.next, struct pci_dev,
- bus_list);
- BUG_ON(!child_dev->is_pcie);
+ */
+ child = list_entry(linkbus->devices.next, struct pci_dev, bus_list);
+ BUG_ON(!child->is_pcie);
/* Check downstream component if bit Slot Clock Configuration is 1 */
- child_pos = pci_find_capability(child_dev, PCI_CAP_ID_EXP);
- pci_read_config_word(child_dev, child_pos + PCI_EXP_LNKSTA, &reg16);
+ cpos = pci_find_capability(child, PCI_CAP_ID_EXP);
+ pci_read_config_word(child, cpos + PCI_EXP_LNKSTA, &reg16);
if (!(reg16 & PCI_EXP_LNKSTA_SLC))
same_clock = 0;
/* Check upstream component if bit Slot Clock Configuration is 1 */
- pos = pci_find_capability(pdev, PCI_CAP_ID_EXP);
- pci_read_config_word(pdev, pos + PCI_EXP_LNKSTA, &reg16);
+ ppos = pci_find_capability(parent, PCI_CAP_ID_EXP);
+ pci_read_config_word(parent, ppos + PCI_EXP_LNKSTA, &reg16);
if (!(reg16 & PCI_EXP_LNKSTA_SLC))
same_clock = 0;
/* Configure downstream component, all functions */
- list_for_each_entry(child_dev, &pdev->subordinate->devices, bus_list) {
- child_pos = pci_find_capability(child_dev, PCI_CAP_ID_EXP);
- pci_read_config_word(child_dev, child_pos + PCI_EXP_LNKCTL,
- &reg16);
- child_regs[i] = reg16;
+ list_for_each_entry(child, &linkbus->devices, bus_list) {
+ cpos = pci_find_capability(child, PCI_CAP_ID_EXP);
+ pci_read_config_word(child, cpos + PCI_EXP_LNKCTL, &reg16);
+ child_reg[PCI_FUNC(child->devfn)] = reg16;
if (same_clock)
reg16 |= PCI_EXP_LNKCTL_CCC;
else
reg16 &= ~PCI_EXP_LNKCTL_CCC;
- pci_write_config_word(child_dev, child_pos + PCI_EXP_LNKCTL,
- reg16);
- i++;
+ pci_write_config_word(child, cpos + PCI_EXP_LNKCTL, reg16);
}
/* Configure upstream component */
- pci_read_config_word(pdev, pos + PCI_EXP_LNKCTL, &reg16);
+ pci_read_config_word(parent, ppos + PCI_EXP_LNKCTL, &reg16);
parent_reg = reg16;
if (same_clock)
reg16 |= PCI_EXP_LNKCTL_CCC;
else
reg16 &= ~PCI_EXP_LNKCTL_CCC;
- pci_write_config_word(pdev, pos + PCI_EXP_LNKCTL, reg16);
+ pci_write_config_word(parent, ppos + PCI_EXP_LNKCTL, reg16);
- /* retrain link */
+ /* Retrain link */
reg16 |= PCI_EXP_LNKCTL_RL;
- pci_write_config_word(pdev, pos + PCI_EXP_LNKCTL, reg16);
+ pci_write_config_word(parent, ppos + PCI_EXP_LNKCTL, reg16);
- /* Wait for link training end */
- /* break out after waiting for timeout */
+ /* Wait for link training end. Break out after waiting for timeout */
start_jiffies = jiffies;
for (;;) {
- pci_read_config_word(pdev, pos + PCI_EXP_LNKSTA, &reg16);
+ pci_read_config_word(parent, ppos + PCI_EXP_LNKSTA, &reg16);
if (!(reg16 & PCI_EXP_LNKSTA_LT))
break;
if (time_after(jiffies, start_jiffies + LINK_RETRAIN_TIMEOUT))
break;
msleep(1);
}
- /* training failed -> recover */
- if (reg16 & PCI_EXP_LNKSTA_LT) {
- dev_printk (KERN_ERR, &pdev->dev, "ASPM: Could not configure"
- " common clock\n");
- i = 0;
- list_for_each_entry(child_dev, &pdev->subordinate->devices,
- bus_list) {
- child_pos = pci_find_capability(child_dev,
- PCI_CAP_ID_EXP);
- pci_write_config_word(child_dev,
- child_pos + PCI_EXP_LNKCTL,
- child_regs[i]);
- i++;
- }
- pci_write_config_word(pdev, pos + PCI_EXP_LNKCTL, parent_reg);
+ if (!(reg16 & PCI_EXP_LNKSTA_LT))
+ return;
+
+ /* Training failed. Restore common clock configurations */
+ dev_printk(KERN_ERR, &parent->dev,
+ "ASPM: Could not configure common clock\n");
+ list_for_each_entry(child, &linkbus->devices, bus_list) {
+ cpos = pci_find_capability(child, PCI_CAP_ID_EXP);
+ pci_write_config_word(child, cpos + PCI_EXP_LNKCTL,
+ child_reg[PCI_FUNC(child->devfn)]);
}
+ pci_write_config_word(parent, ppos + PCI_EXP_LNKCTL, parent_reg);
}
-/*
- * calc_L0S_latency: Convert L0s latency encoding to ns
- */
-static unsigned int calc_L0S_latency(unsigned int latency_encoding, int ac)
+/* Convert L0s latency encoding to ns */
+static u32 calc_l0s_latency(u32 encoding)
{
- unsigned int ns = 64;
+ if (encoding == 0x7)
+ return (5 * 1000); /* > 4us */
+ return (64 << encoding);
+}
- if (latency_encoding == 0x7) {
- if (ac)
- ns = -1U;
- else
- ns = 5*1000; /* > 4us */
- } else
- ns *= (1 << latency_encoding);
- return ns;
+/* Convert L0s acceptable latency encoding to ns */
+static u32 calc_l0s_acceptable(u32 encoding)
+{
+ if (encoding == 0x7)
+ return -1U;
+ return (64 << encoding);
}
-/*
- * calc_L1_latency: Convert L1 latency encoding to ns
- */
-static unsigned int calc_L1_latency(unsigned int latency_encoding, int ac)
+/* Convert L1 latency encoding to ns */
+static u32 calc_l1_latency(u32 encoding)
{
- unsigned int ns = 1000;
+ if (encoding == 0x7)
+ return (65 * 1000); /* > 64us */
+ return (1000 << encoding);
+}
- if (latency_encoding == 0x7) {
- if (ac)
- ns = -1U;
- else
- ns = 65*1000; /* > 64us */
- } else
- ns *= (1 << latency_encoding);
- return ns;
+/* Convert L1 acceptable latency encoding to ns */
+static u32 calc_l1_acceptable(u32 encoding)
+{
+ if (encoding == 0x7)
+ return -1U;
+ return (1000 << encoding);
}
static void pcie_aspm_get_cap_device(struct pci_dev *pdev, u32 *state,
- unsigned int *l0s, unsigned int *l1, unsigned int *enabled)
+ u32 *l0s, u32 *l1, u32 *enabled)
{
int pos;
u16 reg16;
- u32 reg32;
- unsigned int latency;
+ u32 reg32, encoding;
+ *l0s = *l1 = *enabled = 0;
pos = pci_find_capability(pdev, PCI_CAP_ID_EXP);
pci_read_config_dword(pdev, pos + PCI_EXP_LNKCAP, &reg32);
*state = (reg32 & PCI_EXP_LNKCAP_ASPMS) >> 10;
if (*state != PCIE_LINK_STATE_L0S &&
- *state != (PCIE_LINK_STATE_L1|PCIE_LINK_STATE_L0S))
+ *state != (PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_L0S))
*state = 0;
if (*state == 0)
return;
- latency = (reg32 & PCI_EXP_LNKCAP_L0SEL) >> 12;
- *l0s = calc_L0S_latency(latency, 0);
+ encoding = (reg32 & PCI_EXP_LNKCAP_L0SEL) >> 12;
+ *l0s = calc_l0s_latency(encoding);
if (*state & PCIE_LINK_STATE_L1) {
- latency = (reg32 & PCI_EXP_LNKCAP_L1EL) >> 15;
- *l1 = calc_L1_latency(latency, 0);
+ encoding = (reg32 & PCI_EXP_LNKCAP_L1EL) >> 15;
+ *l1 = calc_l1_latency(encoding);
}
pci_read_config_word(pdev, pos + PCI_EXP_LNKCTL, &reg16);
- *enabled = reg16 & (PCIE_LINK_STATE_L0S|PCIE_LINK_STATE_L1);
+ *enabled = reg16 & (PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1);
}
-static void pcie_aspm_cap_init(struct pci_dev *pdev)
+static void pcie_aspm_cap_init(struct pcie_link_state *link, int blacklist)
{
- struct pci_dev *child_dev;
- u32 state, tmp;
- struct pcie_link_state *link_state = pdev->link_state;
+ u32 support, l0s, l1, enabled;
+ struct pci_dev *child, *parent = link->pdev;
+ struct pci_bus *linkbus = parent->subordinate;
+
+ if (blacklist) {
+ /* Set support state to 0, so we will disable ASPM later */
+ link->aspm_support = 0;
+ link->aspm_default = 0;
+ link->aspm_enabled = PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1;
+ return;
+ }
+
+ /* Configure common clock before checking latencies */
+ pcie_aspm_configure_common_clock(link);
/* upstream component states */
- pcie_aspm_get_cap_device(pdev, &link_state->support_state,
- &link_state->l0s_upper_latency,
- &link_state->l1_upper_latency,
- &link_state->enabled_state);
+ pcie_aspm_get_cap_device(parent, &support, &l0s, &l1, &enabled);
+ link->aspm_support = support;
+ link->latency.l0s = l0s;
+ link->latency.l1 = l1;
+ link->aspm_enabled = enabled;
+
/* downstream component states, all functions have the same setting */
- child_dev = list_entry(pdev->subordinate->devices.next, struct pci_dev,
- bus_list);
- pcie_aspm_get_cap_device(child_dev, &state,
- &link_state->l0s_down_latency,
- &link_state->l1_down_latency,
- &tmp);
- link_state->support_state &= state;
- if (!link_state->support_state)
+ child = list_entry(linkbus->devices.next, struct pci_dev, bus_list);
+ pcie_aspm_get_cap_device(child, &support, &l0s, &l1, &enabled);
+ link->aspm_support &= support;
+ link->latency.l0s = max_t(u32, link->latency.l0s, l0s);
+ link->latency.l1 = max_t(u32, link->latency.l1, l1);
+
+ if (!link->aspm_support)
return;
- link_state->enabled_state &= link_state->support_state;
- link_state->bios_aspm_state = link_state->enabled_state;
+
+ link->aspm_enabled &= link->aspm_support;
+ link->aspm_default = link->aspm_enabled;
/* ENDPOINT states*/
- list_for_each_entry(child_dev, &pdev->subordinate->devices, bus_list) {
+ list_for_each_entry(child, &linkbus->devices, bus_list) {
int pos;
- u32 reg32;
- unsigned int latency;
- struct endpoint_state *ep_state =
- &link_state->endpoints[PCI_FUNC(child_dev->devfn)];
+ u32 reg32, encoding;
+ struct aspm_latency *acceptable =
+ &link->acceptable[PCI_FUNC(child->devfn)];
- if (child_dev->pcie_type != PCI_EXP_TYPE_ENDPOINT &&
- child_dev->pcie_type != PCI_EXP_TYPE_LEG_END)
+ if (child->pcie_type != PCI_EXP_TYPE_ENDPOINT &&
+ child->pcie_type != PCI_EXP_TYPE_LEG_END)
continue;
- pos = pci_find_capability(child_dev, PCI_CAP_ID_EXP);
- pci_read_config_dword(child_dev, pos + PCI_EXP_DEVCAP, &reg32);
- latency = (reg32 & PCI_EXP_DEVCAP_L0S) >> 6;
- latency = calc_L0S_latency(latency, 1);
- ep_state->l0s_acceptable_latency = latency;
- if (link_state->support_state & PCIE_LINK_STATE_L1) {
- latency = (reg32 & PCI_EXP_DEVCAP_L1) >> 9;
- latency = calc_L1_latency(latency, 1);
- ep_state->l1_acceptable_latency = latency;
+ pos = pci_find_capability(child, PCI_CAP_ID_EXP);
+ pci_read_config_dword(child, pos + PCI_EXP_DEVCAP, &reg32);
+ encoding = (reg32 & PCI_EXP_DEVCAP_L0S) >> 6;
+ acceptable->l0s = calc_l0s_acceptable(encoding);
+ if (link->aspm_support & PCIE_LINK_STATE_L1) {
+ encoding = (reg32 & PCI_EXP_DEVCAP_L1) >> 9;
+ acceptable->l1 = calc_l1_acceptable(encoding);
}
}
}
-static unsigned int __pcie_aspm_check_state_one(struct pci_dev *pdev,
- unsigned int state)
-{
- struct pci_dev *parent_dev, *tmp_dev;
- unsigned int latency, l1_latency = 0;
- struct pcie_link_state *link_state;
- struct endpoint_state *ep_state;
-
- parent_dev = pdev->bus->self;
- link_state = parent_dev->link_state;
- state &= link_state->support_state;
- if (state == 0)
- return 0;
- ep_state = &link_state->endpoints[PCI_FUNC(pdev->devfn)];
-
- /*
- * Check latency for endpoint device.
- * TBD: The latency from the endpoint to root complex vary per
- * switch's upstream link state above the device. Here we just do a
- * simple check which assumes all links above the device can be in L1
- * state, that is we just consider the worst case. If switch's upstream
- * link can't be put into L0S/L1, then our check is too strictly.
- */
- tmp_dev = pdev;
- while (state & (PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1)) {
- parent_dev = tmp_dev->bus->self;
- link_state = parent_dev->link_state;
- if (state & PCIE_LINK_STATE_L0S) {
- latency = max_t(unsigned int,
- link_state->l0s_upper_latency,
- link_state->l0s_down_latency);
- if (latency > ep_state->l0s_acceptable_latency)
- state &= ~PCIE_LINK_STATE_L0S;
- }
- if (state & PCIE_LINK_STATE_L1) {
- latency = max_t(unsigned int,
- link_state->l1_upper_latency,
- link_state->l1_down_latency);
- if (latency + l1_latency >
- ep_state->l1_acceptable_latency)
- state &= ~PCIE_LINK_STATE_L1;
- }
- if (!parent_dev->bus->self) /* parent_dev is a root port */
- break;
- else {
- /*
- * parent_dev is the downstream port of a switch, make
- * tmp_dev the upstream port of the switch
- */
- tmp_dev = parent_dev->bus->self;
- /*
- * every switch on the path to root complex need 1 more
- * microsecond for L1. Spec doesn't mention L0S.
- */
- if (state & PCIE_LINK_STATE_L1)
- l1_latency += 1000;
- }
+/**
+ * __pcie_aspm_check_state_one - check latency for endpoint device.
+ * @endpoint: pointer to the struct pci_dev of endpoint device
+ *
+ * TBD: The latency from the endpoint to root complex vary per switch's
+ * upstream link state above the device. Here we just do a simple check
+ * which assumes all links above the device can be in L1 state, that
+ * is we just consider the worst case. If switch's upstream link can't
+ * be put into L0S/L1, then our check is too strictly.
+ */
+static u32 __pcie_aspm_check_state_one(struct pci_dev *endpoint, u32 state)
+{
+ u32 l1_switch_latency = 0;
+ struct aspm_latency *acceptable;
+ struct pcie_link_state *link;
+
+ link = endpoint->bus->self->link_state;
+ state &= link->aspm_support;
+ acceptable = &link->acceptable[PCI_FUNC(endpoint->devfn)];
+
+ while (link && state) {
+ if ((state & PCIE_LINK_STATE_L0S) &&
+ (link->latency.l0s > acceptable->l0s))
+ state &= ~PCIE_LINK_STATE_L0S;
+ if ((state & PCIE_LINK_STATE_L1) &&
+ (link->latency.l1 + l1_switch_latency > acceptable->l1))
+ state &= ~PCIE_LINK_STATE_L1;
+ link = link->parent;
+ /*
+ * Every switch on the path to root complex need 1
+ * more microsecond for L1. Spec doesn't mention L0s.
+ */
+ l1_switch_latency += 1000;
}
return state;
}
-static unsigned int pcie_aspm_check_state(struct pci_dev *pdev,
- unsigned int state)
+static u32 pcie_aspm_check_state(struct pcie_link_state *link, u32 state)
{
- struct pci_dev *child_dev;
+ pci_power_t power_state;
+ struct pci_dev *child;
+ struct pci_bus *linkbus = link->pdev->subordinate;
/* If no child, ignore the link */
- if (list_empty(&pdev->subordinate->devices))
+ if (list_empty(&linkbus->devices))
return state;
- list_for_each_entry(child_dev, &pdev->subordinate->devices, bus_list) {
- if (child_dev->pcie_type == PCI_EXP_TYPE_PCI_BRIDGE) {
- /*
- * If downstream component of a link is pci bridge, we
- * disable ASPM for now for the link
- * */
- state = 0;
- break;
- }
- if ((child_dev->pcie_type != PCI_EXP_TYPE_ENDPOINT &&
- child_dev->pcie_type != PCI_EXP_TYPE_LEG_END))
+
+ list_for_each_entry(child, &linkbus->devices, bus_list) {
+ /*
+ * If downstream component of a link is pci bridge, we
+ * disable ASPM for now for the link
+ */
+ if (child->pcie_type == PCI_EXP_TYPE_PCI_BRIDGE)
+ return 0;
+
+ if ((child->pcie_type != PCI_EXP_TYPE_ENDPOINT &&
+ child->pcie_type != PCI_EXP_TYPE_LEG_END))
continue;
/* Device not in D0 doesn't need check latency */
- if (child_dev->current_state == PCI_D1 ||
- child_dev->current_state == PCI_D2 ||
- child_dev->current_state == PCI_D3hot ||
- child_dev->current_state == PCI_D3cold)
+ power_state = child->current_state;
+ if (power_state == PCI_D1 || power_state == PCI_D2 ||
+ power_state == PCI_D3hot || power_state == PCI_D3cold)
continue;
- state = __pcie_aspm_check_state_one(child_dev, state);
+ state = __pcie_aspm_check_state_one(child, state);
}
return state;
}
@@ -482,90 +453,71 @@ static void __pcie_aspm_config_one_dev(struct pci_dev *pdev, unsigned int state)
pci_write_config_word(pdev, pos + PCI_EXP_LNKCTL, reg16);
}
-static void __pcie_aspm_config_link(struct pci_dev *pdev, unsigned int state)
+static void __pcie_aspm_config_link(struct pcie_link_state *link, u32 state)
{
- struct pci_dev *child_dev;
- int valid = 1;
- struct pcie_link_state *link_state = pdev->link_state;
+ struct pci_dev *child, *parent = link->pdev;
+ struct pci_bus *linkbus = parent->subordinate;
/* If no child, disable the link */
- if (list_empty(&pdev->subordinate->devices))
+ if (list_empty(&linkbus->devices))
state = 0;
/*
- * if the downstream component has pci bridge function, don't do ASPM
- * now
+ * If the downstream component has pci bridge function, don't
+ * do ASPM now.
*/
- list_for_each_entry(child_dev, &pdev->subordinate->devices, bus_list) {
- if (child_dev->pcie_type == PCI_EXP_TYPE_PCI_BRIDGE) {
- valid = 0;
- break;
- }
+ list_for_each_entry(child, &linkbus->devices, bus_list) {
+ if (child->pcie_type == PCI_EXP_TYPE_PCI_BRIDGE)
+ return;
}
- if (!valid)
- return;
-
/*
- * spec 2.0 suggests all functions should be configured the same
- * setting for ASPM. Enabling ASPM L1 should be done in upstream
- * component first and then downstream, and vice versa for disabling
- * ASPM L1. Spec doesn't mention L0S.
+ * Spec 2.0 suggests all functions should be configured the
+ * same setting for ASPM. Enabling ASPM L1 should be done in
+ * upstream component first and then downstream, and vice
+ * versa for disabling ASPM L1. Spec doesn't mention L0S.
*/
if (state & PCIE_LINK_STATE_L1)
- __pcie_aspm_config_one_dev(pdev, state);
+ __pcie_aspm_config_one_dev(parent, state);
- list_for_each_entry(child_dev, &pdev->subordinate->devices, bus_list)
- __pcie_aspm_config_one_dev(child_dev, state);
+ list_for_each_entry(child, &linkbus->devices, bus_list)
+ __pcie_aspm_config_one_dev(child, state);
if (!(state & PCIE_LINK_STATE_L1))
- __pcie_aspm_config_one_dev(pdev, state);
+ __pcie_aspm_config_one_dev(parent, state);
- link_state->enabled_state = state;
+ link->aspm_enabled = state;
}
-static struct pcie_link_state *get_root_port_link(struct pcie_link_state *link)
+/* Check the whole hierarchy, and configure each link in the hierarchy */
+static void __pcie_aspm_configure_link_state(struct pcie_link_state *link,
+ u32 state)
{
- struct pcie_link_state *root_port_link = link;
- while (root_port_link->parent)
- root_port_link = root_port_link->parent;
- return root_port_link;
-}
+ struct pcie_link_state *leaf, *root = link->root;
-/* check the whole hierarchy, and configure each link in the hierarchy */
-static void __pcie_aspm_configure_link_state(struct pci_dev *pdev,
- unsigned int state)
-{
- struct pcie_link_state *link_state = pdev->link_state;
- struct pcie_link_state *root_port_link = get_root_port_link(link_state);
- struct pcie_link_state *leaf;
+ state &= (PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1);
- state &= PCIE_LINK_STATE_L0S|PCIE_LINK_STATE_L1;
-
- /* check all links who have specific root port link */
- list_for_each_entry(leaf, &link_list, sibiling) {
- if (!list_empty(&leaf->children) ||
- get_root_port_link(leaf) != root_port_link)
+ /* Check all links who have specific root port link */
+ list_for_each_entry(leaf, &link_list, sibling) {
+ if (!list_empty(&leaf->children) || (leaf->root != root))
continue;
- state = pcie_aspm_check_state(leaf->pdev, state);
+ state = pcie_aspm_check_state(leaf, state);
}
- /* check root port link too in case it hasn't children */
- state = pcie_aspm_check_state(root_port_link->pdev, state);
-
- if (link_state->enabled_state == state)
+ /* Check root port link too in case it hasn't children */
+ state = pcie_aspm_check_state(root, state);
+ if (link->aspm_enabled == state)
return;
-
/*
- * we must change the hierarchy. See comments in
+ * We must change the hierarchy. See comments in
* __pcie_aspm_config_link for the order
**/
if (state & PCIE_LINK_STATE_L1) {
- list_for_each_entry(leaf, &link_list, sibiling) {
- if (get_root_port_link(leaf) == root_port_link)
- __pcie_aspm_config_link(leaf->pdev, state);
+ list_for_each_entry(leaf, &link_list, sibling) {
+ if (leaf->root == root)
+ __pcie_aspm_config_link(leaf, state);
}
} else {
- list_for_each_entry_reverse(leaf, &link_list, sibiling) {
- if (get_root_port_link(leaf) == root_port_link)
- __pcie_aspm_config_link(leaf->pdev, state);
+ list_for_each_entry_reverse(leaf, &link_list, sibling) {
+ if (leaf->root == root)
+ __pcie_aspm_config_link(leaf, state);
}
}
}
@@ -574,45 +526,42 @@ static void __pcie_aspm_configure_link_state(struct pci_dev *pdev,
* pcie_aspm_configure_link_state: enable/disable PCI express link state
* @pdev: the root port or switch downstream port
*/
-static void pcie_aspm_configure_link_state(struct pci_dev *pdev,
- unsigned int state)
+static void pcie_aspm_configure_link_state(struct pcie_link_state *link,
+ u32 state)
{
down_read(&pci_bus_sem);
mutex_lock(&aspm_lock);
- __pcie_aspm_configure_link_state(pdev, state);
+ __pcie_aspm_configure_link_state(link, state);
mutex_unlock(&aspm_lock);
up_read(&pci_bus_sem);
}
-static void free_link_state(struct pci_dev *pdev)
+static void free_link_state(struct pcie_link_state *link)
{
- kfree(pdev->link_state);
- pdev->link_state = NULL;
+ link->pdev->link_state = NULL;
+ kfree(link);
}
static int pcie_aspm_sanity_check(struct pci_dev *pdev)
{
- struct pci_dev *child_dev;
- int child_pos;
+ struct pci_dev *child;
+ int pos;
u32 reg32;
-
/*
- * Some functions in a slot might not all be PCIE functions, very
- * strange. Disable ASPM for the whole slot
+ * Some functions in a slot might not all be PCIE functions,
+ * very strange. Disable ASPM for the whole slot
*/
- list_for_each_entry(child_dev, &pdev->subordinate->devices, bus_list) {
- child_pos = pci_find_capability(child_dev, PCI_CAP_ID_EXP);
- if (!child_pos)
+ list_for_each_entry(child, &pdev->subordinate->devices, bus_list) {
+ pos = pci_find_capability(child, PCI_CAP_ID_EXP);
+ if (!pos)
return -EINVAL;
-
/*
* Disable ASPM for pre-1.1 PCIe device, we follow MS to use
* RBER bit to determine if a function is 1.1 version device
*/
- pci_read_config_dword(child_dev, child_pos + PCI_EXP_DEVCAP,
- &reg32);
+ pci_read_config_dword(child, pos + PCI_EXP_DEVCAP, &reg32);
if (!(reg32 & PCI_EXP_DEVCAP_RBER) && !aspm_force) {
- dev_printk(KERN_INFO, &child_dev->dev, "disabling ASPM"
+ dev_printk(KERN_INFO, &child->dev, "disabling ASPM"
" on pre-1.1 PCIe device. You can enable it"
" with 'pcie_aspm=force'\n");
return -EINVAL;
@@ -621,6 +570,47 @@ static int pcie_aspm_sanity_check(struct pci_dev *pdev)
return 0;
}
+static struct pcie_link_state *pcie_aspm_setup_link_state(struct pci_dev *pdev)
+{
+ struct pcie_link_state *link;
+ int blacklist = !!pcie_aspm_sanity_check(pdev);
+
+ link = kzalloc(sizeof(*link), GFP_KERNEL);
+ if (!link)
+ return NULL;
+ INIT_LIST_HEAD(&link->sibling);
+ INIT_LIST_HEAD(&link->children);
+ INIT_LIST_HEAD(&link->link);
+ link->pdev = pdev;
+ if (pdev->pcie_type == PCI_EXP_TYPE_DOWNSTREAM) {
+ struct pcie_link_state *parent;
+ parent = pdev->bus->parent->self->link_state;
+ if (!parent) {
+ kfree(link);
+ return NULL;
+ }
+ link->parent = parent;
+ list_add(&link->link, &parent->children);
+ }
+ /* Setup a pointer to the root port link */
+ if (!link->parent)
+ link->root = link;
+ else
+ link->root = link->parent->root;
+
+ list_add(&link->sibling, &link_list);
+
+ pdev->link_state = link;
+
+ /* Check ASPM capability */
+ pcie_aspm_cap_init(link, blacklist);
+
+ /* Check Clock PM capability */
+ pcie_clkpm_cap_init(link, blacklist);
+
+ return link;
+}
+
/*
* pcie_aspm_init_link_state: Initiate PCI express link state.
* It is called after the pcie and its children devices are scaned.
@@ -628,75 +618,47 @@ static int pcie_aspm_sanity_check(struct pci_dev *pdev)
*/
void pcie_aspm_init_link_state(struct pci_dev *pdev)
{
- unsigned int state;
- struct pcie_link_state *link_state;
- int error = 0;
- int blacklist;
+ u32 state;
+ struct pcie_link_state *link;
if (aspm_disabled || !pdev->is_pcie || pdev->link_state)
return;
if (pdev->pcie_type != PCI_EXP_TYPE_ROOT_PORT &&
- pdev->pcie_type != PCI_EXP_TYPE_DOWNSTREAM)
+ pdev->pcie_type != PCI_EXP_TYPE_DOWNSTREAM)
+ return;
+
+ /* VIA has a strange chipset, root port is under a bridge */
+ if (pdev->pcie_type == PCI_EXP_TYPE_ROOT_PORT &&
+ pdev->bus->self)
return;
+
down_read(&pci_bus_sem);
if (list_empty(&pdev->subordinate->devices))
goto out;
- blacklist = !!pcie_aspm_sanity_check(pdev);
-
mutex_lock(&aspm_lock);
-
- link_state = kzalloc(sizeof(*link_state), GFP_KERNEL);
- if (!link_state)
- goto unlock_out;
-
- link_state->downstream_has_switch = pcie_aspm_downstream_has_switch(pdev);
- INIT_LIST_HEAD(&link_state->children);
- INIT_LIST_HEAD(&link_state->link);
- if (pdev->bus->self) {/* this is a switch */
- struct pcie_link_state *parent_link_state;
-
- parent_link_state = pdev->bus->parent->self->link_state;
- if (!parent_link_state) {
- kfree(link_state);
- goto unlock_out;
- }
- list_add(&link_state->link, &parent_link_state->children);
- link_state->parent = parent_link_state;
- }
-
- pdev->link_state = link_state;
-
- if (!blacklist) {
- pcie_aspm_configure_common_clock(pdev);
- pcie_aspm_cap_init(pdev);
+ link = pcie_aspm_setup_link_state(pdev);
+ if (!link)
+ goto unlock;
+ /*
+ * Setup initial ASPM state
+ *
+ * If link has switch, delay the link config. The leaf link
+ * initialization will config the whole hierarchy. But we must
+ * make sure BIOS doesn't set unsupported link state.
+ */
+ if (pcie_aspm_downstream_has_switch(link)) {
+ state = pcie_aspm_check_state(link, link->aspm_default);
+ __pcie_aspm_config_link(link, state);
} else {
- link_state->enabled_state = PCIE_LINK_STATE_L0S|PCIE_LINK_STATE_L1;
- link_state->bios_aspm_state = 0;
- /* Set support state to 0, so we will disable ASPM later */
- link_state->support_state = 0;
+ state = policy_to_aspm_state(link);
+ __pcie_aspm_configure_link_state(link, state);
}
- link_state->pdev = pdev;
- list_add(&link_state->sibiling, &link_list);
-
- if (link_state->downstream_has_switch) {
- /*
- * If link has switch, delay the link config. The leaf link
- * initialization will config the whole hierarchy. but we must
- * make sure BIOS doesn't set unsupported link state
- **/
- state = pcie_aspm_check_state(pdev, link_state->bios_aspm_state);
- __pcie_aspm_config_link(pdev, state);
- } else
- __pcie_aspm_configure_link_state(pdev,
- policy_to_aspm_state(pdev));
-
- pcie_check_clock_pm(pdev, blacklist);
-
-unlock_out:
- if (error)
- free_link_state(pdev);
+ /* Setup initial Clock PM state */
+ state = (link->clkpm_capable) ? policy_to_clkpm_state(link) : 0;
+ pcie_set_clkpm(link, state);
+unlock:
mutex_unlock(&aspm_lock);
out:
up_read(&pci_bus_sem);
@@ -725,11 +687,11 @@ void pcie_aspm_exit_link_state(struct pci_dev *pdev)
/* All functions are removed, so just disable ASPM for the link */
__pcie_aspm_config_one_dev(parent, 0);
- list_del(&link_state->sibiling);
+ list_del(&link_state->sibling);
list_del(&link_state->link);
/* Clock PM is for endpoint device */
- free_link_state(parent);
+ free_link_state(link_state);
out:
mutex_unlock(&aspm_lock);
up_read(&pci_bus_sem);
@@ -749,7 +711,7 @@ void pcie_aspm_pm_state_change(struct pci_dev *pdev)
* devices changed PM state, we should recheck if latency meets all
* functions' requirement
*/
- pcie_aspm_configure_link_state(pdev, link_state->enabled_state);
+ pcie_aspm_configure_link_state(link_state, link_state->aspm_enabled);
}
/*
@@ -772,14 +734,12 @@ void pci_disable_link_state(struct pci_dev *pdev, int state)
down_read(&pci_bus_sem);
mutex_lock(&aspm_lock);
link_state = parent->link_state;
- link_state->support_state &=
- ~(state & (PCIE_LINK_STATE_L0S|PCIE_LINK_STATE_L1));
- if (state & PCIE_LINK_STATE_CLKPM)
- link_state->clk_pm_capable = 0;
-
- __pcie_aspm_configure_link_state(parent, link_state->enabled_state);
- if (!link_state->clk_pm_capable && link_state->clk_pm_enabled)
- pcie_set_clock_pm(parent, 0);
+ link_state->aspm_support &= ~state;
+ __pcie_aspm_configure_link_state(link_state, link_state->aspm_enabled);
+ if (state & PCIE_LINK_STATE_CLKPM) {
+ link_state->clkpm_capable = 0;
+ pcie_set_clkpm(link_state, 0);
+ }
mutex_unlock(&aspm_lock);
up_read(&pci_bus_sem);
}
@@ -788,7 +748,6 @@ EXPORT_SYMBOL(pci_disable_link_state);
static int pcie_aspm_set_policy(const char *val, struct kernel_param *kp)
{
int i;
- struct pci_dev *pdev;
struct pcie_link_state *link_state;
for (i = 0; i < ARRAY_SIZE(policy_str); i++)
@@ -802,14 +761,10 @@ static int pcie_aspm_set_policy(const char *val, struct kernel_param *kp)
down_read(&pci_bus_sem);
mutex_lock(&aspm_lock);
aspm_policy = i;
- list_for_each_entry(link_state, &link_list, sibiling) {
- pdev = link_state->pdev;
- __pcie_aspm_configure_link_state(pdev,
- policy_to_aspm_state(pdev));
- if (link_state->clk_pm_capable &&
- link_state->clk_pm_enabled != policy_to_clkpm_state(pdev))
- pcie_set_clock_pm(pdev, policy_to_clkpm_state(pdev));
-
+ list_for_each_entry(link_state, &link_list, sibling) {
+ __pcie_aspm_configure_link_state(link_state,
+ policy_to_aspm_state(link_state));
+ pcie_set_clkpm(link_state, policy_to_clkpm_state(link_state));
}
mutex_unlock(&aspm_lock);
up_read(&pci_bus_sem);
@@ -838,7 +793,7 @@ static ssize_t link_state_show(struct device *dev,
struct pci_dev *pci_device = to_pci_dev(dev);
struct pcie_link_state *link_state = pci_device->link_state;
- return sprintf(buf, "%d\n", link_state->enabled_state);
+ return sprintf(buf, "%d\n", link_state->aspm_enabled);
}
static ssize_t link_state_store(struct device *dev,
@@ -846,7 +801,7 @@ static ssize_t link_state_store(struct device *dev,
const char *buf,
size_t n)
{
- struct pci_dev *pci_device = to_pci_dev(dev);
+ struct pci_dev *pdev = to_pci_dev(dev);
int state;
if (n < 1)
@@ -854,7 +809,7 @@ static ssize_t link_state_store(struct device *dev,
state = buf[0]-'0';
if (state >= 0 && state <= 3) {
/* setup link aspm state */
- pcie_aspm_configure_link_state(pci_device, state);
+ pcie_aspm_configure_link_state(pdev->link_state, state);
return n;
}
@@ -868,7 +823,7 @@ static ssize_t clk_ctl_show(struct device *dev,
struct pci_dev *pci_device = to_pci_dev(dev);
struct pcie_link_state *link_state = pci_device->link_state;
- return sprintf(buf, "%d\n", link_state->clk_pm_enabled);
+ return sprintf(buf, "%d\n", link_state->clkpm_enabled);
}
static ssize_t clk_ctl_store(struct device *dev,
@@ -876,7 +831,7 @@ static ssize_t clk_ctl_store(struct device *dev,
const char *buf,
size_t n)
{
- struct pci_dev *pci_device = to_pci_dev(dev);
+ struct pci_dev *pdev = to_pci_dev(dev);
int state;
if (n < 1)
@@ -885,7 +840,7 @@ static ssize_t clk_ctl_store(struct device *dev,
down_read(&pci_bus_sem);
mutex_lock(&aspm_lock);
- pcie_set_clock_pm(pci_device, !!state);
+ pcie_set_clkpm_nocheck(pdev->link_state, !!state);
mutex_unlock(&aspm_lock);
up_read(&pci_bus_sem);
@@ -904,10 +859,10 @@ void pcie_aspm_create_sysfs_dev_files(struct pci_dev *pdev)
pdev->pcie_type != PCI_EXP_TYPE_DOWNSTREAM) || !link_state)
return;
- if (link_state->support_state)
+ if (link_state->aspm_support)
sysfs_add_file_to_group(&pdev->dev.kobj,
&dev_attr_link_state.attr, power_group);
- if (link_state->clk_pm_capable)
+ if (link_state->clkpm_capable)
sysfs_add_file_to_group(&pdev->dev.kobj,
&dev_attr_clk_ctl.attr, power_group);
}
@@ -920,10 +875,10 @@ void pcie_aspm_remove_sysfs_dev_files(struct pci_dev *pdev)
pdev->pcie_type != PCI_EXP_TYPE_DOWNSTREAM) || !link_state)
return;
- if (link_state->support_state)
+ if (link_state->aspm_support)
sysfs_remove_file_from_group(&pdev->dev.kobj,
&dev_attr_link_state.attr, power_group);
- if (link_state->clk_pm_capable)
+ if (link_state->clkpm_capable)
sysfs_remove_file_from_group(&pdev->dev.kobj,
&dev_attr_clk_ctl.attr, power_group);
}
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index f1ae2475ffff..40e75f6a5056 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -193,7 +193,7 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
res->flags |= pci_calc_resource_flags(l) | IORESOURCE_SIZEALIGN;
if (type == pci_bar_io) {
l &= PCI_BASE_ADDRESS_IO_MASK;
- mask = PCI_BASE_ADDRESS_IO_MASK & 0xffff;
+ mask = PCI_BASE_ADDRESS_IO_MASK & IO_SPACE_LIMIT;
} else {
l &= PCI_BASE_ADDRESS_MEM_MASK;
mask = (u32)PCI_BASE_ADDRESS_MEM_MASK;
@@ -237,6 +237,8 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
dev_printk(KERN_DEBUG, &dev->dev,
"reg %x 64bit mmio: %pR\n", pos, res);
}
+
+ res->flags |= IORESOURCE_MEM_64;
} else {
sz = pci_size(l, sz, mask);
@@ -287,7 +289,7 @@ void __devinit pci_read_bridge_bases(struct pci_bus *child)
struct resource *res;
int i;
- if (!child->parent) /* It's a host bus, nothing to read */
+ if (pci_is_root_bus(child)) /* It's a host bus, nothing to read */
return;
if (dev->transparent) {
@@ -362,7 +364,10 @@ void __devinit pci_read_bridge_bases(struct pci_bus *child)
}
}
if (base <= limit) {
- res->flags = (mem_base_lo & PCI_MEMORY_RANGE_TYPE_MASK) | IORESOURCE_MEM | IORESOURCE_PREFETCH;
+ res->flags = (mem_base_lo & PCI_PREF_RANGE_TYPE_MASK) |
+ IORESOURCE_MEM | IORESOURCE_PREFETCH;
+ if (res->flags & PCI_PREF_RANGE_TYPE_64)
+ res->flags |= IORESOURCE_MEM_64;
res->start = base;
res->end = limit + 0xfffff;
dev_printk(KERN_DEBUG, &dev->dev, "bridge %sbit mmio pref: %pR\n",
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index bd4253f93d5a..06b965623962 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -1058,6 +1058,11 @@ static void __devinit quirk_no_ata_d3(struct pci_dev *pdev)
}
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, PCI_ANY_ID, quirk_no_ata_d3);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_ATI, PCI_ANY_ID, quirk_no_ata_d3);
+/* ALi loses some register settings that we cannot then restore */
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_AL, PCI_ANY_ID, quirk_no_ata_d3);
+/* VIA comes back fine but we need to keep it alive or ACPI GTM failures
+ occur when mode detecting */
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_VIA, PCI_ANY_ID, quirk_no_ata_d3);
/* This was originally an Alpha specific thing, but it really fits here.
* The i82375 PCI/EISA bridge appears as non-classified. Fix that.
@@ -1133,6 +1138,7 @@ static void __init asus_hides_smbus_hostbridge(struct pci_dev *dev)
switch (dev->subsystem_device) {
case 0x1751: /* M2N notebook */
case 0x1821: /* M5N notebook */
+ case 0x1897: /* A6L notebook */
asus_hides_smbus = 1;
}
else if (dev->device == PCI_DEVICE_ID_INTEL_82855PM_HB)
@@ -1163,6 +1169,7 @@ static void __init asus_hides_smbus_hostbridge(struct pci_dev *dev)
switch (dev->subsystem_device) {
case 0x12bc: /* HP D330L */
case 0x12bd: /* HP D530 */
+ case 0x006a: /* HP Compaq nx9500 */
asus_hides_smbus = 1;
}
else if (dev->device == PCI_DEVICE_ID_INTEL_82875_HB)
@@ -2016,6 +2023,28 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_BROADCOM,
PCI_DEVICE_ID_NX2_5709S,
quirk_brcm_570x_limit_vpd);
+/* Originally in EDAC sources for i82875P:
+ * Intel tells BIOS developers to hide device 6 which
+ * configures the overflow device access containing
+ * the DRBs - this is where we expose device 6.
+ * http://www.x86-secret.com/articles/tweak/pat/patsecrets-2.htm
+ */
+static void __devinit quirk_unhide_mch_dev6(struct pci_dev *dev)
+{
+ u8 reg;
+
+ if (pci_read_config_byte(dev, 0xF4, &reg) == 0 && !(reg & 0x02)) {
+ dev_info(&dev->dev, "Enabling MCH 'Overflow' Device\n");
+ pci_write_config_byte(dev, 0xF4, reg | 0x02);
+ }
+}
+
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82865_HB,
+ quirk_unhide_mch_dev6);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82875_HB,
+ quirk_unhide_mch_dev6);
+
+
#ifdef CONFIG_PCI_MSI
/* Some chipsets do not support MSI. We cannot easily rely on setting
* PCI_BUS_FLAGS_NO_MSI in its bus flags because there are actually
diff --git a/drivers/pci/remove.c b/drivers/pci/remove.c
index 86503c14ce7e..176615e7231f 100644
--- a/drivers/pci/remove.c
+++ b/drivers/pci/remove.c
@@ -32,8 +32,6 @@ static void pci_stop_dev(struct pci_dev *dev)
static void pci_destroy_dev(struct pci_dev *dev)
{
- pci_stop_dev(dev);
-
/* Remove the device from the device lists, and prevent any further
* list accesses from this device */
down_write(&pci_bus_sem);
diff --git a/drivers/pci/search.c b/drivers/pci/search.c
index 710d4ea69568..e8cb5051c311 100644
--- a/drivers/pci/search.c
+++ b/drivers/pci/search.c
@@ -29,7 +29,7 @@ pci_find_upstream_pcie_bridge(struct pci_dev *pdev)
if (pdev->is_pcie)
return NULL;
while (1) {
- if (!pdev->bus->parent)
+ if (pci_is_root_bus(pdev->bus))
break;
pdev = pdev->bus->self;
/* a p2p bridge */
@@ -115,36 +115,6 @@ pci_find_next_bus(const struct pci_bus *from)
#ifdef CONFIG_PCI_LEGACY
/**
- * pci_find_slot - locate PCI device from a given PCI slot
- * @bus: number of PCI bus on which desired PCI device resides
- * @devfn: encodes number of PCI slot in which the desired PCI
- * device resides and the logical device number within that slot
- * in case of multi-function devices.
- *
- * Given a PCI bus and slot/function number, the desired PCI device
- * is located in system global list of PCI devices. If the device
- * is found, a pointer to its data structure is returned. If no
- * device is found, %NULL is returned.
- *
- * NOTE: Do not use this function any more; use pci_get_slot() instead, as
- * the PCI device returned by this function can disappear at any moment in
- * time.
- */
-struct pci_dev *pci_find_slot(unsigned int bus, unsigned int devfn)
-{
- struct pci_dev *dev = NULL;
-
- while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) {
- if (dev->bus->number == bus && dev->devfn == devfn) {
- pci_dev_put(dev);
- return dev;
- }
- }
- return NULL;
-}
-EXPORT_SYMBOL(pci_find_slot);
-
-/**
* pci_find_device - begin or continue searching for a PCI device by vendor/device id
* @vendor: PCI vendor id to match, or %PCI_ANY_ID to match all vendor ids
* @device: PCI device id to match, or %PCI_ANY_ID to match all device ids
diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c
index a00f85471b6e..b636e245445d 100644
--- a/drivers/pci/setup-bus.c
+++ b/drivers/pci/setup-bus.c
@@ -58,7 +58,6 @@ static void pbus_assign_resources_sorted(const struct pci_bus *bus)
res = list->res;
idx = res - &list->dev->resource[0];
if (pci_assign_resource(list->dev, idx)) {
- /* FIXME: get rid of this */
res->start = 0;
res->end = 0;
res->flags = 0;
@@ -143,6 +142,7 @@ static void pci_setup_bridge(struct pci_bus *bus)
struct pci_dev *bridge = bus->self;
struct pci_bus_region region;
u32 l, bu, lu, io_upper16;
+ int pref_mem64;
if (pci_is_enabled(bridge))
return;
@@ -198,16 +198,22 @@ static void pci_setup_bridge(struct pci_bus *bus)
pci_write_config_dword(bridge, PCI_PREF_LIMIT_UPPER32, 0);
/* Set up PREF base/limit. */
+ pref_mem64 = 0;
bu = lu = 0;
pcibios_resource_to_bus(bridge, &region, bus->resource[2]);
if (bus->resource[2]->flags & IORESOURCE_PREFETCH) {
+ int width = 8;
l = (region.start >> 16) & 0xfff0;
l |= region.end & 0xfff00000;
- bu = upper_32_bits(region.start);
- lu = upper_32_bits(region.end);
- dev_info(&bridge->dev, " PREFETCH window: %#016llx-%#016llx\n",
- (unsigned long long)region.start,
- (unsigned long long)region.end);
+ if (bus->resource[2]->flags & IORESOURCE_MEM_64) {
+ pref_mem64 = 1;
+ bu = upper_32_bits(region.start);
+ lu = upper_32_bits(region.end);
+ width = 16;
+ }
+ dev_info(&bridge->dev, " PREFETCH window: %#0*llx-%#0*llx\n",
+ width, (unsigned long long)region.start,
+ width, (unsigned long long)region.end);
}
else {
l = 0x0000fff0;
@@ -215,9 +221,11 @@ static void pci_setup_bridge(struct pci_bus *bus)
}
pci_write_config_dword(bridge, PCI_PREF_MEMORY_BASE, l);
- /* Set the upper 32 bits of PREF base & limit. */
- pci_write_config_dword(bridge, PCI_PREF_BASE_UPPER32, bu);
- pci_write_config_dword(bridge, PCI_PREF_LIMIT_UPPER32, lu);
+ if (pref_mem64) {
+ /* Set the upper 32 bits of PREF base & limit. */
+ pci_write_config_dword(bridge, PCI_PREF_BASE_UPPER32, bu);
+ pci_write_config_dword(bridge, PCI_PREF_LIMIT_UPPER32, lu);
+ }
pci_write_config_word(bridge, PCI_BRIDGE_CONTROL, bus->bridge_ctl);
}
@@ -255,8 +263,25 @@ static void pci_bridge_check_ranges(struct pci_bus *bus)
pci_read_config_dword(bridge, PCI_PREF_MEMORY_BASE, &pmem);
pci_write_config_dword(bridge, PCI_PREF_MEMORY_BASE, 0x0);
}
- if (pmem)
+ if (pmem) {
b_res[2].flags |= IORESOURCE_MEM | IORESOURCE_PREFETCH;
+ if ((pmem & PCI_PREF_RANGE_TYPE_MASK) == PCI_PREF_RANGE_TYPE_64)
+ b_res[2].flags |= IORESOURCE_MEM_64;
+ }
+
+ /* double check if bridge does support 64 bit pref */
+ if (b_res[2].flags & IORESOURCE_MEM_64) {
+ u32 mem_base_hi, tmp;
+ pci_read_config_dword(bridge, PCI_PREF_BASE_UPPER32,
+ &mem_base_hi);
+ pci_write_config_dword(bridge, PCI_PREF_BASE_UPPER32,
+ 0xffffffff);
+ pci_read_config_dword(bridge, PCI_PREF_BASE_UPPER32, &tmp);
+ if (!tmp)
+ b_res[2].flags &= ~IORESOURCE_MEM_64;
+ pci_write_config_dword(bridge, PCI_PREF_BASE_UPPER32,
+ mem_base_hi);
+ }
}
/* Helper function for sizing routines: find first available
@@ -336,6 +361,7 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask, unsigned long
resource_size_t aligns[12]; /* Alignments from 1Mb to 2Gb */
int order, max_order;
struct resource *b_res = find_free_bus_resource(bus, type);
+ unsigned int mem64_mask = 0;
if (!b_res)
return 0;
@@ -344,9 +370,12 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask, unsigned long
max_order = 0;
size = 0;
+ mem64_mask = b_res->flags & IORESOURCE_MEM_64;
+ b_res->flags &= ~IORESOURCE_MEM_64;
+
list_for_each_entry(dev, &bus->devices, bus_list) {
int i;
-
+
for (i = 0; i < PCI_NUM_RESOURCES; i++) {
struct resource *r = &dev->resource[i];
resource_size_t r_size;
@@ -372,6 +401,7 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask, unsigned long
aligns[order] += align;
if (order > max_order)
max_order = order;
+ mem64_mask &= r->flags & IORESOURCE_MEM_64;
}
}
@@ -396,6 +426,7 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask, unsigned long
b_res->start = min_align;
b_res->end = size + min_align - 1;
b_res->flags |= IORESOURCE_STARTALIGN;
+ b_res->flags |= mem64_mask;
return 1;
}
diff --git a/drivers/pci/setup-res.c b/drivers/pci/setup-res.c
index 12403516776a..b711fb7181e2 100644
--- a/drivers/pci/setup-res.c
+++ b/drivers/pci/setup-res.c
@@ -135,23 +135,16 @@ void pci_disable_bridge_window(struct pci_dev *dev)
}
#endif /* CONFIG_PCI_QUIRKS */
-int pci_assign_resource(struct pci_dev *dev, int resno)
+static int __pci_assign_resource(struct pci_bus *bus, struct pci_dev *dev,
+ int resno)
{
- struct pci_bus *bus = dev->bus;
struct resource *res = dev->resource + resno;
resource_size_t size, min, align;
int ret;
size = resource_size(res);
min = (res->flags & IORESOURCE_IO) ? PCIBIOS_MIN_IO : PCIBIOS_MIN_MEM;
-
align = resource_alignment(res);
- if (!align) {
- dev_info(&dev->dev, "BAR %d: can't allocate resource (bogus "
- "alignment) %pR flags %#lx\n",
- resno, res, res->flags);
- return -EINVAL;
- }
/* First, try exact prefetching match.. */
ret = pci_bus_alloc_resource(bus, res, size, align, min,
@@ -169,10 +162,7 @@ int pci_assign_resource(struct pci_dev *dev, int resno)
pcibios_align_resource, dev);
}
- if (ret) {
- dev_info(&dev->dev, "BAR %d: can't allocate %s resource %pR\n",
- resno, res->flags & IORESOURCE_IO ? "I/O" : "mem", res);
- } else {
+ if (!ret) {
res->flags &= ~IORESOURCE_STARTALIGN;
if (resno < PCI_BRIDGE_RESOURCES)
pci_update_resource(dev, resno);
@@ -181,6 +171,39 @@ int pci_assign_resource(struct pci_dev *dev, int resno)
return ret;
}
+int pci_assign_resource(struct pci_dev *dev, int resno)
+{
+ struct resource *res = dev->resource + resno;
+ resource_size_t align;
+ struct pci_bus *bus;
+ int ret;
+
+ align = resource_alignment(res);
+ if (!align) {
+ dev_info(&dev->dev, "BAR %d: can't allocate resource (bogus "
+ "alignment) %pR flags %#lx\n",
+ resno, res, res->flags);
+ return -EINVAL;
+ }
+
+ bus = dev->bus;
+ while ((ret = __pci_assign_resource(bus, dev, resno))) {
+ if (bus->parent && bus->self->transparent)
+ bus = bus->parent;
+ else
+ bus = NULL;
+ if (bus)
+ continue;
+ break;
+ }
+
+ if (ret)
+ dev_info(&dev->dev, "BAR %d: can't allocate %s resource %pR\n",
+ resno, res->flags & IORESOURCE_IO ? "I/O" : "mem", res);
+
+ return ret;
+}
+
#if 0
int pci_assign_resource_fixed(struct pci_dev *dev, int resno)
{
diff --git a/drivers/pci/slot.c b/drivers/pci/slot.c
index fe95ce20bcbd..8c02b6c53bdb 100644
--- a/drivers/pci/slot.c
+++ b/drivers/pci/slot.c
@@ -307,6 +307,45 @@ void pci_destroy_slot(struct pci_slot *slot)
}
EXPORT_SYMBOL_GPL(pci_destroy_slot);
+#if defined(CONFIG_HOTPLUG_PCI) || defined(CONFIG_HOTPLUG_PCI_MODULE)
+#include <linux/pci_hotplug.h>
+/**
+ * pci_hp_create_link - create symbolic link to the hotplug driver module.
+ * @pci_slot: struct pci_slot
+ *
+ * Helper function for pci_hotplug_core.c to create symbolic link to
+ * the hotplug driver module.
+ */
+void pci_hp_create_module_link(struct pci_slot *pci_slot)
+{
+ struct hotplug_slot *slot = pci_slot->hotplug;
+ struct kobject *kobj = NULL;
+ int no_warn;
+
+ if (!slot || !slot->ops)
+ return;
+ kobj = kset_find_obj(module_kset, slot->ops->mod_name);
+ if (!kobj)
+ return;
+ no_warn = sysfs_create_link(&pci_slot->kobj, kobj, "module");
+ kobject_put(kobj);
+}
+EXPORT_SYMBOL_GPL(pci_hp_create_module_link);
+
+/**
+ * pci_hp_remove_link - remove symbolic link to the hotplug driver module.
+ * @pci_slot: struct pci_slot
+ *
+ * Helper function for pci_hotplug_core.c to remove symbolic link to
+ * the hotplug driver module.
+ */
+void pci_hp_remove_module_link(struct pci_slot *pci_slot)
+{
+ sysfs_remove_link(&pci_slot->kobj, "module");
+}
+EXPORT_SYMBOL_GPL(pci_hp_remove_module_link);
+#endif
+
static int pci_slot_init(void)
{
struct kset *pci_bus_kset;
diff --git a/drivers/pcmcia/tcic.c b/drivers/pcmcia/tcic.c
index 9ad97ea836e8..8eb04230fec7 100644
--- a/drivers/pcmcia/tcic.c
+++ b/drivers/pcmcia/tcic.c
@@ -472,7 +472,8 @@ static int __init init_tcic(void)
init_timer(&poll_timer);
/* Build interrupt mask */
- printk(", %d sockets\n" KERN_INFO " irq list (", sockets);
+ printk(KERN_CONT ", %d sockets\n", sockets);
+ printk(KERN_INFO " irq list (");
if (irq_list_count == 0)
mask = irq_mask;
else
diff --git a/drivers/pcmcia/vrc4171_card.c b/drivers/pcmcia/vrc4171_card.c
index 659421d0ca46..d4ad50d737b0 100644
--- a/drivers/pcmcia/vrc4171_card.c
+++ b/drivers/pcmcia/vrc4171_card.c
@@ -1,7 +1,7 @@
/*
* vrc4171_card.c, NEC VRC4171 Card Controller driver for Socket Services.
*
- * Copyright (C) 2003-2005 Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp>
+ * Copyright (C) 2003-2005 Yoichi Yuasa <yuasa@linux-mips.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -32,7 +32,7 @@
#include "i82365.h"
MODULE_DESCRIPTION("NEC VRC4171 Card Controllers driver for Socket Services");
-MODULE_AUTHOR("Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp>");
+MODULE_AUTHOR("Yoichi Yuasa <yuasa@linux-mips.org>");
MODULE_LICENSE("GPL");
#define CARD_MAX_SLOTS 2
diff --git a/drivers/pcmcia/vrc4173_cardu.c b/drivers/pcmcia/vrc4173_cardu.c
index 812f038e9bda..9b3c15827e5c 100644
--- a/drivers/pcmcia/vrc4173_cardu.c
+++ b/drivers/pcmcia/vrc4173_cardu.c
@@ -6,7 +6,7 @@
* NEC VRC4173 CARDU driver for Socket Services
* (This device doesn't support CardBus. it is supporting only 16bit PC Card.)
*
- * Copyright 2002,2003 Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp>
+ * Copyright 2002,2003 Yoichi Yuasa <yuasa@linux-mips.org>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
@@ -41,7 +41,7 @@
#include "vrc4173_cardu.h"
MODULE_DESCRIPTION("NEC VRC4173 CARDU driver for Socket Services");
-MODULE_AUTHOR("Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp>");
+MODULE_AUTHOR("Yoichi Yuasa <yuasa@linux-mips.org>");
MODULE_LICENSE("GPL");
static int vrc4173_cardu_slots;
diff --git a/drivers/pcmcia/vrc4173_cardu.h b/drivers/pcmcia/vrc4173_cardu.h
index 7d77c74120c1..a7d96018ed8d 100644
--- a/drivers/pcmcia/vrc4173_cardu.h
+++ b/drivers/pcmcia/vrc4173_cardu.h
@@ -5,7 +5,7 @@
* BRIEF MODULE DESCRIPTION
* Include file for NEC VRC4173 CARDU.
*
- * Copyright 2002 Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp>
+ * Copyright 2002 Yoichi Yuasa <yuasa@linux-mips.org>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
index c682ac536415..46dad12f952f 100644
--- a/drivers/platform/x86/Kconfig
+++ b/drivers/platform/x86/Kconfig
@@ -34,10 +34,27 @@ config ACER_WMI
If you have an ACPI-WMI compatible Acer/ Wistron laptop, say Y or M
here.
+config ACERHDF
+ tristate "Acer Aspire One temperature and fan driver"
+ depends on THERMAL && THERMAL_HWMON && ACPI
+ ---help---
+ This is a driver for Acer Aspire One netbooks. It allows to access
+ the temperature sensor and to control the fan.
+
+ After loading this driver the BIOS is still in control of the fan.
+ To let the kernel handle the fan, do:
+ echo -n enabled > /sys/class/thermal/thermal_zone0/mode
+
+ For more information about this driver see
+ <http://piie.net/files/acerhdf_README.txt>
+
+ If you have an Acer Aspire One netbook, say Y or M
+ here.
+
config ASUS_LAPTOP
- tristate "Asus Laptop Extras (EXPERIMENTAL)"
+ tristate "Asus Laptop Extras"
depends on ACPI
- depends on EXPERIMENTAL && !ACPI_ASUS
+ depends on !ACPI_ASUS
select LEDS_CLASS
select NEW_LEDS
select BACKLIGHT_CLASS_DEVICE
@@ -45,12 +62,12 @@ config ASUS_LAPTOP
---help---
This is the new Linux driver for Asus laptops. It may also support some
MEDION, JVC or VICTOR laptops. It makes all the extra buttons generate
- standard ACPI events that go through /proc/acpi/events. It also adds
+ standard ACPI events and input events. It also adds
support for video output switching, LCD backlight control, Bluetooth and
Wlan control, and most importantly, allows you to blink those fancy LEDs.
For more information and a userspace daemon for handling the extra
- buttons see <http://acpi4asus.sf.net/>.
+ buttons see <http://acpi4asus.sf.net>.
If you have an ACPI-compatible ASUS laptop, say Y or M here.
@@ -338,11 +355,15 @@ config EEEPC_LAPTOP
depends on INPUT
depends on EXPERIMENTAL
depends on RFKILL || RFKILL = n
+ depends on HOTPLUG_PCI
select BACKLIGHT_CLASS_DEVICE
select HWMON
---help---
This driver supports the Fn-Fx keys on Eee PC laptops.
- It also adds the ability to switch camera/wlan on/off.
+
+ It also gives access to some extra laptop functionalities like
+ Bluetooth, backlight and allows powering on/off some other
+ devices.
If you have an Eee PC laptop, say Y or M here.
@@ -369,7 +390,7 @@ config ACPI_WMI
any ACPI-WMI devices.
config ACPI_ASUS
- tristate "ASUS/Medion Laptop Extras"
+ tristate "ASUS/Medion Laptop Extras (DEPRECATED)"
depends on ACPI
select BACKLIGHT_CLASS_DEVICE
---help---
@@ -390,7 +411,7 @@ config ACPI_ASUS
parameters.
More information and a userspace daemon for handling the extra buttons
- at <http://sourceforge.net/projects/acpi4asus/>.
+ at <http://acpi4asus.sf.net>.
If you have an ACPI-compatible ASUS laptop, say Y or M here. This
driver is still under development, so if your laptop is unsupported or
diff --git a/drivers/platform/x86/Makefile b/drivers/platform/x86/Makefile
index e40c7bd1b87e..641b8bfa5538 100644
--- a/drivers/platform/x86/Makefile
+++ b/drivers/platform/x86/Makefile
@@ -9,6 +9,7 @@ obj-$(CONFIG_COMPAL_LAPTOP) += compal-laptop.o
obj-$(CONFIG_DELL_LAPTOP) += dell-laptop.o
obj-$(CONFIG_DELL_WMI) += dell-wmi.o
obj-$(CONFIG_ACER_WMI) += acer-wmi.o
+obj-$(CONFIG_ACERHDF) += acerhdf.o
obj-$(CONFIG_HP_WMI) += hp-wmi.o
obj-$(CONFIG_TC1100_WMI) += tc1100-wmi.o
obj-$(CONFIG_SONY_LAPTOP) += sony-laptop.o
diff --git a/drivers/platform/x86/acer-wmi.c b/drivers/platform/x86/acer-wmi.c
index 09a503e5da6a..be2fd6f91639 100644
--- a/drivers/platform/x86/acer-wmi.c
+++ b/drivers/platform/x86/acer-wmi.c
@@ -958,12 +958,12 @@ static void acer_rfkill_update(struct work_struct *ignored)
status = get_u32(&state, ACER_CAP_WIRELESS);
if (ACPI_SUCCESS(status))
- rfkill_set_sw_state(wireless_rfkill, !!state);
+ rfkill_set_sw_state(wireless_rfkill, !state);
if (has_cap(ACER_CAP_BLUETOOTH)) {
status = get_u32(&state, ACER_CAP_BLUETOOTH);
if (ACPI_SUCCESS(status))
- rfkill_set_sw_state(bluetooth_rfkill, !!state);
+ rfkill_set_sw_state(bluetooth_rfkill, !state);
}
schedule_delayed_work(&acer_rfkill_work, round_jiffies_relative(HZ));
diff --git a/drivers/platform/x86/acerhdf.c b/drivers/platform/x86/acerhdf.c
new file mode 100644
index 000000000000..bdfee177eefb
--- /dev/null
+++ b/drivers/platform/x86/acerhdf.c
@@ -0,0 +1,602 @@
+/*
+ * acerhdf - A driver which monitors the temperature
+ * of the aspire one netbook, turns on/off the fan
+ * as soon as the upper/lower threshold is reached.
+ *
+ * (C) 2009 - Peter Feuerer peter (a) piie.net
+ * http://piie.net
+ * 2009 Borislav Petkov <petkovbb@gmail.com>
+ *
+ * Inspired by and many thanks to:
+ * o acerfand - Rachel Greenham
+ * o acer_ec.pl - Michael Kurz michi.kurz (at) googlemail.com
+ * - Petr Tomasek tomasek (#) etf,cuni,cz
+ * - Carlos Corbacho cathectic (at) gmail.com
+ * o lkml - Matthew Garrett
+ * - Borislav Petkov
+ * - Andreas Mohr
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#define pr_fmt(fmt) "acerhdf: " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/dmi.h>
+#include <acpi/acpi_drivers.h>
+#include <linux/sched.h>
+#include <linux/thermal.h>
+#include <linux/platform_device.h>
+
+/*
+ * The driver is started with "kernel mode off" by default. That means, the BIOS
+ * is still in control of the fan. In this mode the driver allows to read the
+ * temperature of the cpu and a userspace tool may take over control of the fan.
+ * If the driver is switched to "kernel mode" (e.g. via module parameter) the
+ * driver is in full control of the fan. If you want the module to be started in
+ * kernel mode by default, define the following:
+ */
+#undef START_IN_KERNEL_MODE
+
+#define DRV_VER "0.5.13"
+
+/*
+ * According to the Atom N270 datasheet,
+ * (http://download.intel.com/design/processor/datashts/320032.pdf) the
+ * CPU's optimal operating limits denoted in junction temperature as
+ * measured by the on-die thermal monitor are within 0 <= Tj <= 90. So,
+ * assume 89°C is critical temperature.
+ */
+#define ACERHDF_TEMP_CRIT 89
+#define ACERHDF_FAN_OFF 0
+#define ACERHDF_FAN_AUTO 1
+
+/*
+ * No matter what value the user puts into the fanon variable, turn on the fan
+ * at 80 degree Celsius to prevent hardware damage
+ */
+#define ACERHDF_MAX_FANON 80
+
+/*
+ * Maximum interval between two temperature checks is 15 seconds, as the die
+ * can get hot really fast under heavy load (plus we shouldn't forget about
+ * possible impact of _external_ aggressive sources such as heaters, sun etc.)
+ */
+#define ACERHDF_MAX_INTERVAL 15
+
+#ifdef START_IN_KERNEL_MODE
+static int kernelmode = 1;
+#else
+static int kernelmode;
+#endif
+
+static unsigned int interval = 10;
+static unsigned int fanon = 63;
+static unsigned int fanoff = 58;
+static unsigned int verbose;
+static unsigned int fanstate = ACERHDF_FAN_AUTO;
+static char force_bios[16];
+static unsigned int prev_interval;
+struct thermal_zone_device *thz_dev;
+struct thermal_cooling_device *cl_dev;
+struct platform_device *acerhdf_dev;
+
+module_param(kernelmode, uint, 0);
+MODULE_PARM_DESC(kernelmode, "Kernel mode fan control on / off");
+module_param(interval, uint, 0600);
+MODULE_PARM_DESC(interval, "Polling interval of temperature check");
+module_param(fanon, uint, 0600);
+MODULE_PARM_DESC(fanon, "Turn the fan on above this temperature");
+module_param(fanoff, uint, 0600);
+MODULE_PARM_DESC(fanoff, "Turn the fan off below this temperature");
+module_param(verbose, uint, 0600);
+MODULE_PARM_DESC(verbose, "Enable verbose dmesg output");
+module_param_string(force_bios, force_bios, 16, 0);
+MODULE_PARM_DESC(force_bios, "Force BIOS version and omit BIOS check");
+
+/* BIOS settings */
+struct bios_settings_t {
+ const char *vendor;
+ const char *version;
+ unsigned char fanreg;
+ unsigned char tempreg;
+ unsigned char fancmd[2]; /* fan off and auto commands */
+};
+
+/* Register addresses and values for different BIOS versions */
+static const struct bios_settings_t bios_tbl[] = {
+ {"Acer", "v0.3109", 0x55, 0x58, {0x1f, 0x00} },
+ {"Acer", "v0.3114", 0x55, 0x58, {0x1f, 0x00} },
+ {"Acer", "v0.3301", 0x55, 0x58, {0xaf, 0x00} },
+ {"Acer", "v0.3304", 0x55, 0x58, {0xaf, 0x00} },
+ {"Acer", "v0.3305", 0x55, 0x58, {0xaf, 0x00} },
+ {"Acer", "v0.3308", 0x55, 0x58, {0x21, 0x00} },
+ {"Acer", "v0.3309", 0x55, 0x58, {0x21, 0x00} },
+ {"Acer", "v0.3310", 0x55, 0x58, {0x21, 0x00} },
+ {"Gateway", "v0.3103", 0x55, 0x58, {0x21, 0x00} },
+ {"Packard Bell", "v0.3105", 0x55, 0x58, {0x21, 0x00} },
+ {"", "", 0, 0, {0, 0} }
+};
+
+static const struct bios_settings_t *bios_cfg __read_mostly;
+
+
+static int acerhdf_get_temp(int *temp)
+{
+ u8 read_temp;
+
+ if (ec_read(bios_cfg->tempreg, &read_temp))
+ return -EINVAL;
+
+ *temp = read_temp;
+
+ return 0;
+}
+
+static int acerhdf_get_fanstate(int *state)
+{
+ u8 fan;
+ bool tmp;
+
+ if (ec_read(bios_cfg->fanreg, &fan))
+ return -EINVAL;
+
+ tmp = (fan == bios_cfg->fancmd[ACERHDF_FAN_OFF]);
+ *state = tmp ? ACERHDF_FAN_OFF : ACERHDF_FAN_AUTO;
+
+ return 0;
+}
+
+static void acerhdf_change_fanstate(int state)
+{
+ unsigned char cmd;
+
+ if (verbose)
+ pr_notice("fan %s\n", (state == ACERHDF_FAN_OFF) ?
+ "OFF" : "ON");
+
+ if ((state != ACERHDF_FAN_OFF) && (state != ACERHDF_FAN_AUTO)) {
+ pr_err("invalid fan state %d requested, setting to auto!\n",
+ state);
+ state = ACERHDF_FAN_AUTO;
+ }
+
+ cmd = bios_cfg->fancmd[state];
+ fanstate = state;
+
+ ec_write(bios_cfg->fanreg, cmd);
+}
+
+static void acerhdf_check_param(struct thermal_zone_device *thermal)
+{
+ if (fanon > ACERHDF_MAX_FANON) {
+ pr_err("fanon temperature too high, set to %d\n",
+ ACERHDF_MAX_FANON);
+ fanon = ACERHDF_MAX_FANON;
+ }
+
+ if (kernelmode && prev_interval != interval) {
+ if (interval > ACERHDF_MAX_INTERVAL) {
+ pr_err("interval too high, set to %d\n",
+ ACERHDF_MAX_INTERVAL);
+ interval = ACERHDF_MAX_INTERVAL;
+ }
+ if (verbose)
+ pr_notice("interval changed to: %d\n",
+ interval);
+ thermal->polling_delay = interval*1000;
+ prev_interval = interval;
+ }
+}
+
+/*
+ * This is the thermal zone callback which does the delayed polling of the fan
+ * state. We do check /sysfs-originating settings here in acerhdf_check_param()
+ * as late as the polling interval is since we can't do that in the respective
+ * accessors of the module parameters.
+ */
+static int acerhdf_get_ec_temp(struct thermal_zone_device *thermal,
+ unsigned long *t)
+{
+ int temp, err = 0;
+
+ acerhdf_check_param(thermal);
+
+ err = acerhdf_get_temp(&temp);
+ if (err)
+ return err;
+
+ if (verbose)
+ pr_notice("temp %d\n", temp);
+
+ *t = temp;
+ return 0;
+}
+
+static int acerhdf_bind(struct thermal_zone_device *thermal,
+ struct thermal_cooling_device *cdev)
+{
+ /* if the cooling device is the one from acerhdf bind it */
+ if (cdev != cl_dev)
+ return 0;
+
+ if (thermal_zone_bind_cooling_device(thermal, 0, cdev)) {
+ pr_err("error binding cooling dev\n");
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int acerhdf_unbind(struct thermal_zone_device *thermal,
+ struct thermal_cooling_device *cdev)
+{
+ if (cdev != cl_dev)
+ return 0;
+
+ if (thermal_zone_unbind_cooling_device(thermal, 0, cdev)) {
+ pr_err("error unbinding cooling dev\n");
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static inline void acerhdf_revert_to_bios_mode(void)
+{
+ acerhdf_change_fanstate(ACERHDF_FAN_AUTO);
+ kernelmode = 0;
+ if (thz_dev)
+ thz_dev->polling_delay = 0;
+ pr_notice("kernel mode fan control OFF\n");
+}
+static inline void acerhdf_enable_kernelmode(void)
+{
+ kernelmode = 1;
+
+ thz_dev->polling_delay = interval*1000;
+ thermal_zone_device_update(thz_dev);
+ pr_notice("kernel mode fan control ON\n");
+}
+
+static int acerhdf_get_mode(struct thermal_zone_device *thermal,
+ enum thermal_device_mode *mode)
+{
+ if (verbose)
+ pr_notice("kernel mode fan control %d\n", kernelmode);
+
+ *mode = (kernelmode) ? THERMAL_DEVICE_ENABLED
+ : THERMAL_DEVICE_DISABLED;
+
+ return 0;
+}
+
+/*
+ * set operation mode;
+ * enabled: the thermal layer of the kernel takes care about
+ * the temperature and the fan.
+ * disabled: the BIOS takes control of the fan.
+ */
+static int acerhdf_set_mode(struct thermal_zone_device *thermal,
+ enum thermal_device_mode mode)
+{
+ if (mode == THERMAL_DEVICE_DISABLED && kernelmode)
+ acerhdf_revert_to_bios_mode();
+ else if (mode == THERMAL_DEVICE_ENABLED && !kernelmode)
+ acerhdf_enable_kernelmode();
+
+ return 0;
+}
+
+static int acerhdf_get_trip_type(struct thermal_zone_device *thermal, int trip,
+ enum thermal_trip_type *type)
+{
+ if (trip == 0)
+ *type = THERMAL_TRIP_ACTIVE;
+
+ return 0;
+}
+
+static int acerhdf_get_trip_temp(struct thermal_zone_device *thermal, int trip,
+ unsigned long *temp)
+{
+ if (trip == 0)
+ *temp = fanon;
+
+ return 0;
+}
+
+static int acerhdf_get_crit_temp(struct thermal_zone_device *thermal,
+ unsigned long *temperature)
+{
+ *temperature = ACERHDF_TEMP_CRIT;
+ return 0;
+}
+
+/* bind callback functions to thermalzone */
+struct thermal_zone_device_ops acerhdf_dev_ops = {
+ .bind = acerhdf_bind,
+ .unbind = acerhdf_unbind,
+ .get_temp = acerhdf_get_ec_temp,
+ .get_mode = acerhdf_get_mode,
+ .set_mode = acerhdf_set_mode,
+ .get_trip_type = acerhdf_get_trip_type,
+ .get_trip_temp = acerhdf_get_trip_temp,
+ .get_crit_temp = acerhdf_get_crit_temp,
+};
+
+
+/*
+ * cooling device callback functions
+ * get maximal fan cooling state
+ */
+static int acerhdf_get_max_state(struct thermal_cooling_device *cdev,
+ unsigned long *state)
+{
+ *state = 1;
+
+ return 0;
+}
+
+static int acerhdf_get_cur_state(struct thermal_cooling_device *cdev,
+ unsigned long *state)
+{
+ int err = 0, tmp;
+
+ err = acerhdf_get_fanstate(&tmp);
+ if (err)
+ return err;
+
+ *state = (tmp == ACERHDF_FAN_AUTO) ? 1 : 0;
+ return 0;
+}
+
+/* change current fan state - is overwritten when running in kernel mode */
+static int acerhdf_set_cur_state(struct thermal_cooling_device *cdev,
+ unsigned long state)
+{
+ int cur_temp, cur_state, err = 0;
+
+ if (!kernelmode)
+ return 0;
+
+ err = acerhdf_get_temp(&cur_temp);
+ if (err) {
+ pr_err("error reading temperature, hand off control to BIOS\n");
+ goto err_out;
+ }
+
+ err = acerhdf_get_fanstate(&cur_state);
+ if (err) {
+ pr_err("error reading fan state, hand off control to BIOS\n");
+ goto err_out;
+ }
+
+ if (state == 0) {
+ /* turn fan off only if below fanoff temperature */
+ if ((cur_state == ACERHDF_FAN_AUTO) &&
+ (cur_temp < fanoff))
+ acerhdf_change_fanstate(ACERHDF_FAN_OFF);
+ } else {
+ if (cur_state == ACERHDF_FAN_OFF)
+ acerhdf_change_fanstate(ACERHDF_FAN_AUTO);
+ }
+ return 0;
+
+err_out:
+ acerhdf_revert_to_bios_mode();
+ return -EINVAL;
+}
+
+/* bind fan callbacks to fan device */
+struct thermal_cooling_device_ops acerhdf_cooling_ops = {
+ .get_max_state = acerhdf_get_max_state,
+ .get_cur_state = acerhdf_get_cur_state,
+ .set_cur_state = acerhdf_set_cur_state,
+};
+
+/* suspend / resume functionality */
+static int acerhdf_suspend(struct platform_device *dev, pm_message_t state)
+{
+ if (kernelmode)
+ acerhdf_change_fanstate(ACERHDF_FAN_AUTO);
+
+ if (verbose)
+ pr_notice("going suspend\n");
+
+ return 0;
+}
+
+static int acerhdf_resume(struct platform_device *device)
+{
+ if (verbose)
+ pr_notice("resuming\n");
+
+ return 0;
+}
+
+static int __devinit acerhdf_probe(struct platform_device *device)
+{
+ return 0;
+}
+
+static int acerhdf_remove(struct platform_device *device)
+{
+ return 0;
+}
+
+struct platform_driver acerhdf_drv = {
+ .driver = {
+ .name = "acerhdf",
+ .owner = THIS_MODULE,
+ },
+ .probe = acerhdf_probe,
+ .remove = acerhdf_remove,
+ .suspend = acerhdf_suspend,
+ .resume = acerhdf_resume,
+};
+
+
+/* check hardware */
+static int acerhdf_check_hardware(void)
+{
+ char const *vendor, *version, *product;
+ int i;
+
+ /* get BIOS data */
+ vendor = dmi_get_system_info(DMI_SYS_VENDOR);
+ version = dmi_get_system_info(DMI_BIOS_VERSION);
+ product = dmi_get_system_info(DMI_PRODUCT_NAME);
+
+ pr_info("Acer Aspire One Fan driver, v.%s\n", DRV_VER);
+
+ if (!force_bios[0]) {
+ if (strncmp(product, "AO", 2)) {
+ pr_err("no Aspire One hardware found\n");
+ return -EINVAL;
+ }
+ } else {
+ pr_info("forcing BIOS version: %s\n", version);
+ version = force_bios;
+ kernelmode = 0;
+ }
+
+ if (verbose)
+ pr_info("BIOS info: %s %s, product: %s\n",
+ vendor, version, product);
+
+ /* search BIOS version and vendor in BIOS settings table */
+ for (i = 0; bios_tbl[i].version[0]; i++) {
+ if (!strcmp(bios_tbl[i].vendor, vendor) &&
+ !strcmp(bios_tbl[i].version, version)) {
+ bios_cfg = &bios_tbl[i];
+ break;
+ }
+ }
+
+ if (!bios_cfg) {
+ pr_err("unknown (unsupported) BIOS version %s/%s, "
+ "please report, aborting!\n", vendor, version);
+ return -EINVAL;
+ }
+
+ /*
+ * if started with kernel mode off, prevent the kernel from switching
+ * off the fan
+ */
+ if (!kernelmode) {
+ pr_notice("Fan control off, to enable do:\n");
+ pr_notice("echo -n \"enabled\" > "
+ "/sys/class/thermal/thermal_zone0/mode\n");
+ }
+
+ return 0;
+}
+
+static int acerhdf_register_platform(void)
+{
+ int err = 0;
+
+ err = platform_driver_register(&acerhdf_drv);
+ if (err)
+ return err;
+
+ acerhdf_dev = platform_device_alloc("acerhdf", -1);
+ platform_device_add(acerhdf_dev);
+
+ return 0;
+}
+
+static void acerhdf_unregister_platform(void)
+{
+ if (!acerhdf_dev)
+ return;
+
+ platform_device_del(acerhdf_dev);
+ platform_driver_unregister(&acerhdf_drv);
+}
+
+static int acerhdf_register_thermal(void)
+{
+ cl_dev = thermal_cooling_device_register("acerhdf-fan", NULL,
+ &acerhdf_cooling_ops);
+
+ if (IS_ERR(cl_dev))
+ return -EINVAL;
+
+ thz_dev = thermal_zone_device_register("acerhdf", 1, NULL,
+ &acerhdf_dev_ops, 0, 0, 0,
+ (kernelmode) ? interval*1000 : 0);
+ if (IS_ERR(thz_dev))
+ return -EINVAL;
+
+ return 0;
+}
+
+static void acerhdf_unregister_thermal(void)
+{
+ if (cl_dev) {
+ thermal_cooling_device_unregister(cl_dev);
+ cl_dev = NULL;
+ }
+
+ if (thz_dev) {
+ thermal_zone_device_unregister(thz_dev);
+ thz_dev = NULL;
+ }
+}
+
+static int __init acerhdf_init(void)
+{
+ int err = 0;
+
+ err = acerhdf_check_hardware();
+ if (err)
+ goto out_err;
+
+ err = acerhdf_register_platform();
+ if (err)
+ goto err_unreg;
+
+ err = acerhdf_register_thermal();
+ if (err)
+ goto err_unreg;
+
+ return 0;
+
+err_unreg:
+ acerhdf_unregister_thermal();
+ acerhdf_unregister_platform();
+
+out_err:
+ return -ENODEV;
+}
+
+static void __exit acerhdf_exit(void)
+{
+ acerhdf_change_fanstate(ACERHDF_FAN_AUTO);
+ acerhdf_unregister_thermal();
+ acerhdf_unregister_platform();
+}
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Peter Feuerer");
+MODULE_DESCRIPTION("Aspire One temperature and fan driver");
+MODULE_ALIAS("dmi:*:*Acer*:*:");
+MODULE_ALIAS("dmi:*:*Gateway*:*:");
+MODULE_ALIAS("dmi:*:*Packard Bell*:*:");
+
+module_init(acerhdf_init);
+module_exit(acerhdf_exit);
diff --git a/drivers/platform/x86/asus-laptop.c b/drivers/platform/x86/asus-laptop.c
index bfc1a8892a32..db657bbeec90 100644
--- a/drivers/platform/x86/asus-laptop.c
+++ b/drivers/platform/x86/asus-laptop.c
@@ -33,6 +33,8 @@
* Sam Lin - GPS support
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
@@ -53,9 +55,10 @@
#define ASUS_HOTK_NAME "Asus Laptop Support"
#define ASUS_HOTK_CLASS "hotkey"
#define ASUS_HOTK_DEVICE_NAME "Hotkey"
-#define ASUS_HOTK_FILE "asus-laptop"
+#define ASUS_HOTK_FILE KBUILD_MODNAME
#define ASUS_HOTK_PREFIX "\\_SB.ATKD."
+
/*
* Some events we use, same for all Asus
*/
@@ -207,13 +210,17 @@ MODULE_DEVICE_TABLE(acpi, asus_device_ids);
static int asus_hotk_add(struct acpi_device *device);
static int asus_hotk_remove(struct acpi_device *device, int type);
+static void asus_hotk_notify(struct acpi_device *device, u32 event);
+
static struct acpi_driver asus_hotk_driver = {
.name = ASUS_HOTK_NAME,
.class = ASUS_HOTK_CLASS,
.ids = asus_device_ids,
+ .flags = ACPI_DRIVER_ALL_NOTIFY_EVENTS,
.ops = {
.add = asus_hotk_add,
.remove = asus_hotk_remove,
+ .notify = asus_hotk_notify,
},
};
@@ -323,7 +330,7 @@ static int read_wireless_status(int mask)
rv = acpi_evaluate_integer(wireless_status_handle, NULL, NULL, &status);
if (ACPI_FAILURE(rv))
- printk(ASUS_WARNING "Error reading Wireless status\n");
+ pr_warning("Error reading Wireless status\n");
else
return (status & mask) ? 1 : 0;
@@ -337,7 +344,7 @@ static int read_gps_status(void)
rv = acpi_evaluate_integer(gps_status_handle, NULL, NULL, &status);
if (ACPI_FAILURE(rv))
- printk(ASUS_WARNING "Error reading GPS status\n");
+ pr_warning("Error reading GPS status\n");
else
return status ? 1 : 0;
@@ -377,7 +384,7 @@ static void write_status(acpi_handle handle, int out, int mask)
}
if (write_acpi_int(handle, NULL, out, NULL))
- printk(ASUS_WARNING " write failed %x\n", mask);
+ pr_warning(" write failed %x\n", mask);
}
/* /sys/class/led handlers */
@@ -420,7 +427,7 @@ static int set_lcd_state(int value)
NULL, NULL, NULL);
if (ACPI_FAILURE(status))
- printk(ASUS_WARNING "Error switching LCD\n");
+ pr_warning("Error switching LCD\n");
}
write_status(NULL, lcd, LCD_ON);
@@ -444,7 +451,7 @@ static int read_brightness(struct backlight_device *bd)
rv = acpi_evaluate_integer(brightness_get_handle, NULL, NULL, &value);
if (ACPI_FAILURE(rv))
- printk(ASUS_WARNING "Error reading brightness\n");
+ pr_warning("Error reading brightness\n");
return value;
}
@@ -457,7 +464,7 @@ static int set_brightness(struct backlight_device *bd, int value)
/* 0 <= value <= 15 */
if (write_acpi_int(brightness_set_handle, NULL, value, NULL)) {
- printk(ASUS_WARNING "Error changing brightness\n");
+ pr_warning("Error changing brightness\n");
ret = -EIO;
}
@@ -587,7 +594,7 @@ static ssize_t store_ledd(struct device *dev, struct device_attribute *attr,
rv = parse_arg(buf, count, &value);
if (rv > 0) {
if (write_acpi_int(ledd_set_handle, NULL, value, NULL))
- printk(ASUS_WARNING "LED display write failed\n");
+ pr_warning("LED display write failed\n");
else
hotk->ledd_status = (u32) value;
}
@@ -632,7 +639,7 @@ static void set_display(int value)
{
/* no sanity check needed for now */
if (write_acpi_int(display_set_handle, NULL, value, NULL))
- printk(ASUS_WARNING "Error setting display\n");
+ pr_warning("Error setting display\n");
return;
}
@@ -647,7 +654,7 @@ static int read_display(void)
rv = acpi_evaluate_integer(display_get_handle, NULL,
NULL, &value);
if (ACPI_FAILURE(rv))
- printk(ASUS_WARNING "Error reading display status\n");
+ pr_warning("Error reading display status\n");
}
value &= 0x0F; /* needed for some models, shouldn't hurt others */
@@ -689,7 +696,7 @@ static ssize_t store_disp(struct device *dev, struct device_attribute *attr,
static void set_light_sens_switch(int value)
{
if (write_acpi_int(ls_switch_handle, NULL, value, NULL))
- printk(ASUS_WARNING "Error setting light sensor switch\n");
+ pr_warning("Error setting light sensor switch\n");
hotk->light_switch = value;
}
@@ -714,7 +721,7 @@ static ssize_t store_lssw(struct device *dev, struct device_attribute *attr,
static void set_light_sens_level(int value)
{
if (write_acpi_int(ls_level_handle, NULL, value, NULL))
- printk(ASUS_WARNING "Error setting light sensor level\n");
+ pr_warning("Error setting light sensor level\n");
hotk->light_level = value;
}
@@ -812,7 +819,7 @@ static int asus_setkeycode(struct input_dev *dev, int scancode, int keycode)
return -EINVAL;
}
-static void asus_hotk_notify(acpi_handle handle, u32 event, void *data)
+static void asus_hotk_notify(struct acpi_device *device, u32 event)
{
static struct key_entry *key;
u16 count;
@@ -975,11 +982,11 @@ static int asus_hotk_get_info(void)
*/
status = acpi_get_table(ACPI_SIG_DSDT, 1, &asus_info);
if (ACPI_FAILURE(status))
- printk(ASUS_WARNING "Couldn't get the DSDT table header\n");
+ pr_warning("Couldn't get the DSDT table header\n");
/* We have to write 0 on init this far for all ASUS models */
if (write_acpi_int(hotk->handle, "INIT", 0, &buffer)) {
- printk(ASUS_ERR "Hotkey initialization failed\n");
+ pr_err("Hotkey initialization failed\n");
return -ENODEV;
}
@@ -987,9 +994,9 @@ static int asus_hotk_get_info(void)
status =
acpi_evaluate_integer(hotk->handle, "BSTS", NULL, &bsts_result);
if (ACPI_FAILURE(status))
- printk(ASUS_WARNING "Error calling BSTS\n");
+ pr_warning("Error calling BSTS\n");
else if (bsts_result)
- printk(ASUS_NOTICE "BSTS called, 0x%02x returned\n",
+ pr_notice("BSTS called, 0x%02x returned\n",
(uint) bsts_result);
/* This too ... */
@@ -1020,7 +1027,7 @@ static int asus_hotk_get_info(void)
return -ENOMEM;
if (*string)
- printk(ASUS_NOTICE " %s model detected\n", string);
+ pr_notice(" %s model detected\n", string);
ASUS_HANDLE_INIT(mled_set);
ASUS_HANDLE_INIT(tled_set);
@@ -1077,7 +1084,7 @@ static int asus_input_init(void)
hotk->inputdev = input_allocate_device();
if (!hotk->inputdev) {
- printk(ASUS_INFO "Unable to allocate input device\n");
+ pr_info("Unable to allocate input device\n");
return 0;
}
hotk->inputdev->name = "Asus Laptop extra buttons";
@@ -1096,7 +1103,7 @@ static int asus_input_init(void)
}
result = input_register_device(hotk->inputdev);
if (result) {
- printk(ASUS_INFO "Unable to register input device\n");
+ pr_info("Unable to register input device\n");
input_free_device(hotk->inputdev);
}
return result;
@@ -1113,7 +1120,7 @@ static int asus_hotk_check(void)
if (hotk->device->status.present) {
result = asus_hotk_get_info();
} else {
- printk(ASUS_ERR "Hotkey device not present, aborting\n");
+ pr_err("Hotkey device not present, aborting\n");
return -EINVAL;
}
@@ -1124,13 +1131,12 @@ static int asus_hotk_found;
static int asus_hotk_add(struct acpi_device *device)
{
- acpi_status status = AE_OK;
int result;
if (!device)
return -EINVAL;
- printk(ASUS_NOTICE "Asus Laptop Support version %s\n",
+ pr_notice("Asus Laptop Support version %s\n",
ASUS_LAPTOP_VERSION);
hotk = kzalloc(sizeof(struct asus_hotk), GFP_KERNEL);
@@ -1149,15 +1155,6 @@ static int asus_hotk_add(struct acpi_device *device)
asus_hotk_add_fs();
- /*
- * We install the handler, it will receive the hotk in parameter, so, we
- * could add other data to the hotk struct
- */
- status = acpi_install_notify_handler(hotk->handle, ACPI_ALL_NOTIFY,
- asus_hotk_notify, hotk);
- if (ACPI_FAILURE(status))
- printk(ASUS_ERR "Error installing notify handler\n");
-
asus_hotk_found = 1;
/* WLED and BLED are on by default */
@@ -1198,16 +1195,9 @@ end:
static int asus_hotk_remove(struct acpi_device *device, int type)
{
- acpi_status status = 0;
-
if (!device || !acpi_driver_data(device))
return -EINVAL;
- status = acpi_remove_notify_handler(hotk->handle, ACPI_ALL_NOTIFY,
- asus_hotk_notify);
- if (ACPI_FAILURE(status))
- printk(ASUS_ERR "Error removing notify handler\n");
-
kfree(hotk->name);
kfree(hotk);
@@ -1260,8 +1250,7 @@ static int asus_backlight_init(struct device *dev)
bd = backlight_device_register(ASUS_HOTK_FILE, dev,
NULL, &asusbl_ops);
if (IS_ERR(bd)) {
- printk(ASUS_ERR
- "Could not register asus backlight device\n");
+ pr_err("Could not register asus backlight device\n");
asus_backlight_device = NULL;
return PTR_ERR(bd);
}
@@ -1334,7 +1323,6 @@ out:
static int __init asus_laptop_init(void)
{
- struct device *dev;
int result;
if (acpi_disabled)
@@ -1356,24 +1344,10 @@ static int __init asus_laptop_init(void)
return -ENODEV;
}
- dev = acpi_get_physical_device(hotk->device->handle);
-
- if (!acpi_video_backlight_support()) {
- result = asus_backlight_init(dev);
- if (result)
- goto fail_backlight;
- } else
- printk(ASUS_INFO "Brightness ignored, must be controlled by "
- "ACPI video driver\n");
-
result = asus_input_init();
if (result)
goto fail_input;
- result = asus_led_init(dev);
- if (result)
- goto fail_led;
-
/* Register platform stuff */
result = platform_driver_register(&asuspf_driver);
if (result)
@@ -1394,8 +1368,27 @@ static int __init asus_laptop_init(void)
if (result)
goto fail_sysfs;
+ result = asus_led_init(&asuspf_device->dev);
+ if (result)
+ goto fail_led;
+
+ if (!acpi_video_backlight_support()) {
+ result = asus_backlight_init(&asuspf_device->dev);
+ if (result)
+ goto fail_backlight;
+ } else
+ pr_info("Brightness ignored, must be controlled by "
+ "ACPI video driver\n");
+
return 0;
+fail_backlight:
+ asus_led_exit();
+
+fail_led:
+ sysfs_remove_group(&asuspf_device->dev.kobj,
+ &asuspf_attribute_group);
+
fail_sysfs:
platform_device_del(asuspf_device);
@@ -1406,15 +1399,9 @@ fail_platform_device1:
platform_driver_unregister(&asuspf_driver);
fail_platform_driver:
- asus_led_exit();
-
-fail_led:
asus_input_exit();
fail_input:
- asus_backlight_exit();
-
-fail_backlight:
return result;
}
diff --git a/drivers/platform/x86/asus_acpi.c b/drivers/platform/x86/asus_acpi.c
index ba1f7497e4b9..ddf5240ade8c 100644
--- a/drivers/platform/x86/asus_acpi.c
+++ b/drivers/platform/x86/asus_acpi.c
@@ -455,6 +455,8 @@ static struct asus_hotk *hotk;
*/
static int asus_hotk_add(struct acpi_device *device);
static int asus_hotk_remove(struct acpi_device *device, int type);
+static void asus_hotk_notify(struct acpi_device *device, u32 event);
+
static const struct acpi_device_id asus_device_ids[] = {
{"ATK0100", 0},
{"", 0},
@@ -465,9 +467,11 @@ static struct acpi_driver asus_hotk_driver = {
.name = "asus_acpi",
.class = ACPI_HOTK_CLASS,
.ids = asus_device_ids,
+ .flags = ACPI_DRIVER_ALL_NOTIFY_EVENTS,
.ops = {
.add = asus_hotk_add,
.remove = asus_hotk_remove,
+ .notify = asus_hotk_notify,
},
};
@@ -1101,12 +1105,20 @@ static int asus_hotk_remove_fs(struct acpi_device *device)
return 0;
}
-static void asus_hotk_notify(acpi_handle handle, u32 event, void *data)
+static void asus_hotk_notify(struct acpi_device *device, u32 event)
{
/* TODO Find a better way to handle events count. */
if (!hotk)
return;
+ /*
+ * The BIOS *should* be sending us device events, but apparently
+ * Asus uses system events instead, so just ignore any device
+ * events we get.
+ */
+ if (event > ACPI_MAX_SYS_NOTIFY)
+ return;
+
if ((event & ~((u32) BR_UP)) < 16)
hotk->brightness = (event & ~((u32) BR_UP));
else if ((event & ~((u32) BR_DOWN)) < 16)
@@ -1346,15 +1358,6 @@ static int asus_hotk_add(struct acpi_device *device)
if (result)
goto end;
- /*
- * We install the handler, it will receive the hotk in parameter, so, we
- * could add other data to the hotk struct
- */
- status = acpi_install_notify_handler(hotk->handle, ACPI_SYSTEM_NOTIFY,
- asus_hotk_notify, hotk);
- if (ACPI_FAILURE(status))
- printk(KERN_ERR " Error installing notify handler\n");
-
/* For laptops without GPLV: init the hotk->brightness value */
if ((!hotk->methods->brightness_get)
&& (!hotk->methods->brightness_status)
@@ -1389,16 +1392,9 @@ end:
static int asus_hotk_remove(struct acpi_device *device, int type)
{
- acpi_status status = 0;
-
if (!device || !acpi_driver_data(device))
return -EINVAL;
- status = acpi_remove_notify_handler(hotk->handle, ACPI_SYSTEM_NOTIFY,
- asus_hotk_notify);
- if (ACPI_FAILURE(status))
- printk(KERN_ERR "Asus ACPI: Error removing notify handler\n");
-
asus_hotk_remove_fs(device);
kfree(hotk);
diff --git a/drivers/platform/x86/dell-wmi.c b/drivers/platform/x86/dell-wmi.c
index 2fab94162147..0f900cc9fa7a 100644
--- a/drivers/platform/x86/dell-wmi.c
+++ b/drivers/platform/x86/dell-wmi.c
@@ -46,10 +46,53 @@ struct key_entry {
u16 keycode;
};
-enum { KE_KEY, KE_SW, KE_END };
+enum { KE_KEY, KE_SW, KE_IGNORE, KE_END };
+
+/*
+ * Certain keys are flagged as KE_IGNORE. All of these are either
+ * notifications (rather than requests for change) or are also sent
+ * via the keyboard controller so should not be sent again.
+ */
static struct key_entry dell_wmi_keymap[] = {
{KE_KEY, 0xe045, KEY_PROG1},
+ {KE_KEY, 0xe009, KEY_EJECTCD},
+
+ /* These also contain the brightness level at offset 6 */
+ {KE_KEY, 0xe006, KEY_BRIGHTNESSUP},
+ {KE_KEY, 0xe005, KEY_BRIGHTNESSDOWN},
+
+ /* Battery health status button */
+ {KE_KEY, 0xe007, KEY_BATTERY},
+
+ /* This is actually for all radios. Although physically a
+ * switch, the notification does not provide an indication of
+ * state and so it should be reported as a key */
+ {KE_KEY, 0xe008, KEY_WLAN},
+
+ /* The next device is at offset 6, the active devices are at
+ offset 8 and the attached devices at offset 10 */
+ {KE_KEY, 0xe00b, KEY_DISPLAYTOGGLE},
+
+ {KE_IGNORE, 0xe00c, KEY_KBDILLUMTOGGLE},
+
+ /* BIOS error detected */
+ {KE_IGNORE, 0xe00d, KEY_RESERVED},
+
+ /* Wifi Catcher */
+ {KE_KEY, 0xe011, KEY_PROG2},
+
+ /* Ambient light sensor toggle */
+ {KE_IGNORE, 0xe013, KEY_RESERVED},
+
+ {KE_IGNORE, 0xe020, KEY_MUTE},
+ {KE_IGNORE, 0xe02e, KEY_VOLUMEDOWN},
+ {KE_IGNORE, 0xe030, KEY_VOLUMEUP},
+ {KE_IGNORE, 0xe033, KEY_KBDILLUMUP},
+ {KE_IGNORE, 0xe034, KEY_KBDILLUMDOWN},
+ {KE_IGNORE, 0xe03a, KEY_CAPSLOCK},
+ {KE_IGNORE, 0xe045, KEY_NUMLOCK},
+ {KE_IGNORE, 0xe046, KEY_SCROLLLOCK},
{KE_END, 0}
};
@@ -122,15 +165,20 @@ static void dell_wmi_notify(u32 value, void *context)
if (obj && obj->type == ACPI_TYPE_BUFFER) {
int *buffer = (int *)obj->buffer.pointer;
- key = dell_wmi_get_entry_by_scancode(buffer[1]);
+ /*
+ * The upper bytes of the event may contain
+ * additional information, so mask them off for the
+ * scancode lookup
+ */
+ key = dell_wmi_get_entry_by_scancode(buffer[1] & 0xFFFF);
if (key) {
input_report_key(dell_wmi_input_dev, key->keycode, 1);
input_sync(dell_wmi_input_dev);
input_report_key(dell_wmi_input_dev, key->keycode, 0);
input_sync(dell_wmi_input_dev);
- } else
+ } else if (buffer[1] & 0xFFFF)
printk(KERN_INFO "dell-wmi: Unknown key %x pressed\n",
- buffer[1]);
+ buffer[1] & 0xFFFF);
}
}
diff --git a/drivers/platform/x86/eeepc-laptop.c b/drivers/platform/x86/eeepc-laptop.c
index 03bf522bd7ab..ec560f16d720 100644
--- a/drivers/platform/x86/eeepc-laptop.c
+++ b/drivers/platform/x86/eeepc-laptop.c
@@ -16,6 +16,8 @@
* GNU General Public License for more details.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
@@ -31,6 +33,7 @@
#include <linux/input.h>
#include <linux/rfkill.h>
#include <linux/pci.h>
+#include <linux/pci_hotplug.h>
#define EEEPC_LAPTOP_VERSION "0.1"
@@ -40,11 +43,6 @@
#define EEEPC_HOTK_DEVICE_NAME "Hotkey"
#define EEEPC_HOTK_HID "ASUS010"
-#define EEEPC_LOG EEEPC_HOTK_FILE ": "
-#define EEEPC_ERR KERN_ERR EEEPC_LOG
-#define EEEPC_WARNING KERN_WARNING EEEPC_LOG
-#define EEEPC_NOTICE KERN_NOTICE EEEPC_LOG
-#define EEEPC_INFO KERN_INFO EEEPC_LOG
/*
* Definitions for Asus EeePC
@@ -62,7 +60,10 @@ enum {
DISABLE_ASL_GPS = 0x0020,
DISABLE_ASL_DISPLAYSWITCH = 0x0040,
DISABLE_ASL_MODEM = 0x0080,
- DISABLE_ASL_CARDREADER = 0x0100
+ DISABLE_ASL_CARDREADER = 0x0100,
+ DISABLE_ASL_3G = 0x0200,
+ DISABLE_ASL_WIMAX = 0x0400,
+ DISABLE_ASL_HWCF = 0x0800
};
enum {
@@ -87,7 +88,13 @@ enum {
CM_ASL_USBPORT3,
CM_ASL_MODEM,
CM_ASL_CARDREADER,
- CM_ASL_LID
+ CM_ASL_3G,
+ CM_ASL_WIMAX,
+ CM_ASL_HWCF,
+ CM_ASL_LID,
+ CM_ASL_TYPE,
+ CM_ASL_PANELPOWER, /*P901*/
+ CM_ASL_TPD
};
static const char *cm_getv[] = {
@@ -96,7 +103,8 @@ static const char *cm_getv[] = {
NULL, "PBLG", NULL, NULL,
"CFVG", NULL, NULL, NULL,
"USBG", NULL, NULL, "MODG",
- "CRDG", "LIDG"
+ "CRDG", "M3GG", "WIMG", "HWCF",
+ "LIDG", "TYPE", "PBPG", "TPDG"
};
static const char *cm_setv[] = {
@@ -105,7 +113,8 @@ static const char *cm_setv[] = {
"SDSP", "PBLS", "HDPS", NULL,
"CFVS", NULL, NULL, NULL,
"USBG", NULL, NULL, "MODS",
- "CRDS", NULL
+ "CRDS", "M3GS", "WIMS", NULL,
+ NULL, NULL, "PBPS", "TPDS"
};
#define EEEPC_EC "\\_SB.PCI0.SBRG.EC0."
@@ -130,8 +139,10 @@ struct eeepc_hotk {
u16 event_count[128]; /* count for each event */
struct input_dev *inputdev;
u16 *keycode_map;
- struct rfkill *eeepc_wlan_rfkill;
- struct rfkill *eeepc_bluetooth_rfkill;
+ struct rfkill *wlan_rfkill;
+ struct rfkill *bluetooth_rfkill;
+ struct rfkill *wwan3g_rfkill;
+ struct hotplug_slot *hotplug_slot;
};
/* The actual device the driver binds to */
@@ -180,6 +191,8 @@ static struct key_entry eeepc_keymap[] = {
*/
static int eeepc_hotk_add(struct acpi_device *device);
static int eeepc_hotk_remove(struct acpi_device *device, int type);
+static int eeepc_hotk_resume(struct acpi_device *device);
+static void eeepc_hotk_notify(struct acpi_device *device, u32 event);
static const struct acpi_device_id eeepc_device_ids[] = {
{EEEPC_HOTK_HID, 0},
@@ -191,12 +204,24 @@ static struct acpi_driver eeepc_hotk_driver = {
.name = EEEPC_HOTK_NAME,
.class = EEEPC_HOTK_CLASS,
.ids = eeepc_device_ids,
+ .flags = ACPI_DRIVER_ALL_NOTIFY_EVENTS,
.ops = {
.add = eeepc_hotk_add,
.remove = eeepc_hotk_remove,
+ .resume = eeepc_hotk_resume,
+ .notify = eeepc_hotk_notify,
},
};
+/* PCI hotplug ops */
+static int eeepc_get_adapter_status(struct hotplug_slot *slot, u8 *value);
+
+static struct hotplug_slot_ops eeepc_hotplug_slot_ops = {
+ .owner = THIS_MODULE,
+ .get_adapter_status = eeepc_get_adapter_status,
+ .get_power_status = eeepc_get_adapter_status,
+};
+
/* The backlight device /sys/class/backlight */
static struct backlight_device *eeepc_backlight_device;
@@ -258,20 +283,20 @@ static int set_acpi(int cm, int value)
if (method == NULL)
return -ENODEV;
if (write_acpi_int(ehotk->handle, method, value, NULL))
- printk(EEEPC_WARNING "Error writing %s\n", method);
+ pr_warning("Error writing %s\n", method);
}
return 0;
}
static int get_acpi(int cm)
{
- int value = -1;
+ int value = -ENODEV;
if ((ehotk->cm_supported & (0x1 << cm))) {
const char *method = cm_getv[cm];
if (method == NULL)
return -ENODEV;
if (read_acpi_int(ehotk->handle, method, &value))
- printk(EEEPC_WARNING "Error reading %s\n", method);
+ pr_warning("Error reading %s\n", method);
}
return value;
}
@@ -316,6 +341,15 @@ static const struct rfkill_ops eeepc_rfkill_ops = {
.set_block = eeepc_rfkill_set,
};
+static void __init eeepc_enable_camera(void)
+{
+ /*
+ * If the following call to set_acpi() fails, it's because there's no
+ * camera so we can ignore the error.
+ */
+ set_acpi(CM_ASL_CAMERA, 1);
+}
+
/*
* Sys helpers
*/
@@ -334,13 +368,19 @@ static ssize_t store_sys_acpi(int cm, const char *buf, size_t count)
rv = parse_arg(buf, count, &value);
if (rv > 0)
- set_acpi(cm, value);
+ value = set_acpi(cm, value);
+ if (value < 0)
+ return value;
return rv;
}
static ssize_t show_sys_acpi(int cm, char *buf)
{
- return sprintf(buf, "%d\n", get_acpi(cm));
+ int value = get_acpi(cm);
+
+ if (value < 0)
+ return value;
+ return sprintf(buf, "%d\n", value);
}
#define EEEPC_CREATE_DEVICE_ATTR(_name, _cm) \
@@ -367,13 +407,88 @@ static ssize_t show_sys_acpi(int cm, char *buf)
EEEPC_CREATE_DEVICE_ATTR(camera, CM_ASL_CAMERA);
EEEPC_CREATE_DEVICE_ATTR(cardr, CM_ASL_CARDREADER);
EEEPC_CREATE_DEVICE_ATTR(disp, CM_ASL_DISPLAYSWITCH);
-EEEPC_CREATE_DEVICE_ATTR(cpufv, CM_ASL_CPUFV);
+
+struct eeepc_cpufv {
+ int num;
+ int cur;
+};
+
+static int get_cpufv(struct eeepc_cpufv *c)
+{
+ c->cur = get_acpi(CM_ASL_CPUFV);
+ c->num = (c->cur >> 8) & 0xff;
+ c->cur &= 0xff;
+ if (c->cur < 0 || c->num <= 0 || c->num > 12)
+ return -ENODEV;
+ return 0;
+}
+
+static ssize_t show_available_cpufv(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct eeepc_cpufv c;
+ int i;
+ ssize_t len = 0;
+
+ if (get_cpufv(&c))
+ return -ENODEV;
+ for (i = 0; i < c.num; i++)
+ len += sprintf(buf + len, "%d ", i);
+ len += sprintf(buf + len, "\n");
+ return len;
+}
+
+static ssize_t show_cpufv(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct eeepc_cpufv c;
+
+ if (get_cpufv(&c))
+ return -ENODEV;
+ return sprintf(buf, "%#x\n", (c.num << 8) | c.cur);
+}
+
+static ssize_t store_cpufv(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct eeepc_cpufv c;
+ int rv, value;
+
+ if (get_cpufv(&c))
+ return -ENODEV;
+ rv = parse_arg(buf, count, &value);
+ if (rv < 0)
+ return rv;
+ if (!rv || value < 0 || value >= c.num)
+ return -EINVAL;
+ set_acpi(CM_ASL_CPUFV, value);
+ return rv;
+}
+
+static struct device_attribute dev_attr_cpufv = {
+ .attr = {
+ .name = "cpufv",
+ .mode = 0644 },
+ .show = show_cpufv,
+ .store = store_cpufv
+};
+
+static struct device_attribute dev_attr_available_cpufv = {
+ .attr = {
+ .name = "available_cpufv",
+ .mode = 0444 },
+ .show = show_available_cpufv
+};
static struct attribute *platform_attributes[] = {
&dev_attr_camera.attr,
&dev_attr_cardr.attr,
&dev_attr_disp.attr,
&dev_attr_cpufv.attr,
+ &dev_attr_available_cpufv.attr,
NULL
};
@@ -439,6 +554,28 @@ static int eeepc_setkeycode(struct input_dev *dev, int scancode, int keycode)
return -EINVAL;
}
+static void cmsg_quirk(int cm, const char *name)
+{
+ int dummy;
+
+ /* Some BIOSes do not report cm although it is avaliable.
+ Check if cm_getv[cm] works and, if yes, assume cm should be set. */
+ if (!(ehotk->cm_supported & (1 << cm))
+ && !read_acpi_int(ehotk->handle, cm_getv[cm], &dummy)) {
+ pr_info("%s (%x) not reported by BIOS,"
+ " enabling anyway\n", name, 1 << cm);
+ ehotk->cm_supported |= 1 << cm;
+ }
+}
+
+static void cmsg_quirks(void)
+{
+ cmsg_quirk(CM_ASL_LID, "LID");
+ cmsg_quirk(CM_ASL_TYPE, "TYPE");
+ cmsg_quirk(CM_ASL_PANELPOWER, "PANELPOWER");
+ cmsg_quirk(CM_ASL_TPD, "TPD");
+}
+
static int eeepc_hotk_check(void)
{
const struct key_entry *key;
@@ -451,26 +588,24 @@ static int eeepc_hotk_check(void)
if (ehotk->device->status.present) {
if (write_acpi_int(ehotk->handle, "INIT", ehotk->init_flag,
&buffer)) {
- printk(EEEPC_ERR "Hotkey initialization failed\n");
+ pr_err("Hotkey initialization failed\n");
return -ENODEV;
} else {
- printk(EEEPC_NOTICE "Hotkey init flags 0x%x\n",
- ehotk->init_flag);
+ pr_notice("Hotkey init flags 0x%x\n", ehotk->init_flag);
}
/* get control methods supported */
if (read_acpi_int(ehotk->handle, "CMSG"
, &ehotk->cm_supported)) {
- printk(EEEPC_ERR
- "Get control methods supported failed\n");
+ pr_err("Get control methods supported failed\n");
return -ENODEV;
} else {
- printk(EEEPC_INFO
- "Get control methods supported: 0x%x\n",
- ehotk->cm_supported);
+ cmsg_quirks();
+ pr_info("Get control methods supported: 0x%x\n",
+ ehotk->cm_supported);
}
ehotk->inputdev = input_allocate_device();
if (!ehotk->inputdev) {
- printk(EEEPC_INFO "Unable to allocate input device\n");
+ pr_info("Unable to allocate input device\n");
return 0;
}
ehotk->inputdev->name = "Asus EeePC extra buttons";
@@ -489,12 +624,12 @@ static int eeepc_hotk_check(void)
}
result = input_register_device(ehotk->inputdev);
if (result) {
- printk(EEEPC_INFO "Unable to register input device\n");
+ pr_info("Unable to register input device\n");
input_free_device(ehotk->inputdev);
return 0;
}
} else {
- printk(EEEPC_ERR "Hotkey device not present, aborting\n");
+ pr_err("Hotkey device not present, aborting\n");
return -EINVAL;
}
return 0;
@@ -512,17 +647,27 @@ static int notify_brn(void)
return -1;
}
-static void eeepc_rfkill_notify(acpi_handle handle, u32 event, void *data)
+static int eeepc_get_adapter_status(struct hotplug_slot *hotplug_slot,
+ u8 *value)
+{
+ int val = get_acpi(CM_ASL_WLAN);
+
+ if (val == 1 || val == 0)
+ *value = val;
+ else
+ return -EINVAL;
+
+ return 0;
+}
+
+static void eeepc_rfkill_hotplug(void)
{
struct pci_dev *dev;
struct pci_bus *bus = pci_find_bus(0, 1);
bool blocked;
- if (event != ACPI_NOTIFY_BUS_CHECK)
- return;
-
if (!bus) {
- printk(EEEPC_WARNING "Unable to find PCI bus 1?\n");
+ pr_warning("Unable to find PCI bus 1?\n");
return;
}
@@ -538,7 +683,7 @@ static void eeepc_rfkill_notify(acpi_handle handle, u32 event, void *data)
if (dev) {
pci_bus_assign_resources(bus);
if (pci_bus_add_device(dev))
- printk(EEEPC_ERR "Unable to hotplug wifi\n");
+ pr_err("Unable to hotplug wifi\n");
}
} else {
dev = pci_get_slot(bus, 0);
@@ -548,10 +693,18 @@ static void eeepc_rfkill_notify(acpi_handle handle, u32 event, void *data)
}
}
- rfkill_set_sw_state(ehotk->eeepc_wlan_rfkill, blocked);
+ rfkill_set_sw_state(ehotk->wlan_rfkill, blocked);
}
-static void eeepc_hotk_notify(acpi_handle handle, u32 event, void *data)
+static void eeepc_rfkill_notify(acpi_handle handle, u32 event, void *data)
+{
+ if (event != ACPI_NOTIFY_BUS_CHECK)
+ return;
+
+ eeepc_rfkill_hotplug();
+}
+
+static void eeepc_hotk_notify(struct acpi_device *device, u32 event)
{
static struct key_entry *key;
u16 count;
@@ -559,6 +712,8 @@ static void eeepc_hotk_notify(acpi_handle handle, u32 event, void *data)
if (!ehotk)
return;
+ if (event > ACPI_MAX_SYS_NOTIFY)
+ return;
if (event >= NOTIFY_BRN_MIN && event <= NOTIFY_BRN_MAX)
brn = notify_brn();
count = ehotk->event_count[event % 128]++;
@@ -611,8 +766,7 @@ static int eeepc_register_rfkill_notifier(char *node)
eeepc_rfkill_notify,
NULL);
if (ACPI_FAILURE(status))
- printk(EEEPC_WARNING
- "Failed to register notify on %s\n", node);
+ pr_warning("Failed to register notify on %s\n", node);
} else
return -ENODEV;
@@ -631,20 +785,66 @@ static void eeepc_unregister_rfkill_notifier(char *node)
ACPI_SYSTEM_NOTIFY,
eeepc_rfkill_notify);
if (ACPI_FAILURE(status))
- printk(EEEPC_ERR
- "Error removing rfkill notify handler %s\n",
+ pr_err("Error removing rfkill notify handler %s\n",
node);
}
}
+static void eeepc_cleanup_pci_hotplug(struct hotplug_slot *hotplug_slot)
+{
+ kfree(hotplug_slot->info);
+ kfree(hotplug_slot);
+}
+
+static int eeepc_setup_pci_hotplug(void)
+{
+ int ret = -ENOMEM;
+ struct pci_bus *bus = pci_find_bus(0, 1);
+
+ if (!bus) {
+ pr_err("Unable to find wifi PCI bus\n");
+ return -ENODEV;
+ }
+
+ ehotk->hotplug_slot = kzalloc(sizeof(struct hotplug_slot), GFP_KERNEL);
+ if (!ehotk->hotplug_slot)
+ goto error_slot;
+
+ ehotk->hotplug_slot->info = kzalloc(sizeof(struct hotplug_slot_info),
+ GFP_KERNEL);
+ if (!ehotk->hotplug_slot->info)
+ goto error_info;
+
+ ehotk->hotplug_slot->private = ehotk;
+ ehotk->hotplug_slot->release = &eeepc_cleanup_pci_hotplug;
+ ehotk->hotplug_slot->ops = &eeepc_hotplug_slot_ops;
+ eeepc_get_adapter_status(ehotk->hotplug_slot,
+ &ehotk->hotplug_slot->info->adapter_status);
+
+ ret = pci_hp_register(ehotk->hotplug_slot, bus, 0, "eeepc-wifi");
+ if (ret) {
+ pr_err("Unable to register hotplug slot - %d\n", ret);
+ goto error_register;
+ }
+
+ return 0;
+
+error_register:
+ kfree(ehotk->hotplug_slot->info);
+error_info:
+ kfree(ehotk->hotplug_slot);
+ ehotk->hotplug_slot = NULL;
+error_slot:
+ return ret;
+}
+
static int eeepc_hotk_add(struct acpi_device *device)
{
- acpi_status status = AE_OK;
int result;
if (!device)
return -EINVAL;
- printk(EEEPC_NOTICE EEEPC_HOTK_NAME "\n");
+ pr_notice(EEEPC_HOTK_NAME "\n");
ehotk = kzalloc(sizeof(struct eeepc_hotk), GFP_KERNEL);
if (!ehotk)
return -ENOMEM;
@@ -657,58 +857,9 @@ static int eeepc_hotk_add(struct acpi_device *device)
result = eeepc_hotk_check();
if (result)
goto ehotk_fail;
- status = acpi_install_notify_handler(ehotk->handle, ACPI_SYSTEM_NOTIFY,
- eeepc_hotk_notify, ehotk);
- if (ACPI_FAILURE(status))
- printk(EEEPC_ERR "Error installing notify handler\n");
-
- eeepc_register_rfkill_notifier("\\_SB.PCI0.P0P6");
- eeepc_register_rfkill_notifier("\\_SB.PCI0.P0P7");
-
- if (get_acpi(CM_ASL_WLAN) != -1) {
- ehotk->eeepc_wlan_rfkill = rfkill_alloc("eeepc-wlan",
- &device->dev,
- RFKILL_TYPE_WLAN,
- &eeepc_rfkill_ops,
- (void *)CM_ASL_WLAN);
-
- if (!ehotk->eeepc_wlan_rfkill)
- goto wlan_fail;
-
- rfkill_set_sw_state(ehotk->eeepc_wlan_rfkill,
- get_acpi(CM_ASL_WLAN) != 1);
- result = rfkill_register(ehotk->eeepc_wlan_rfkill);
- if (result)
- goto wlan_fail;
- }
-
- if (get_acpi(CM_ASL_BLUETOOTH) != -1) {
- ehotk->eeepc_bluetooth_rfkill =
- rfkill_alloc("eeepc-bluetooth",
- &device->dev,
- RFKILL_TYPE_BLUETOOTH,
- &eeepc_rfkill_ops,
- (void *)CM_ASL_BLUETOOTH);
-
- if (!ehotk->eeepc_bluetooth_rfkill)
- goto bluetooth_fail;
-
- rfkill_set_sw_state(ehotk->eeepc_bluetooth_rfkill,
- get_acpi(CM_ASL_BLUETOOTH) != 1);
- result = rfkill_register(ehotk->eeepc_bluetooth_rfkill);
- if (result)
- goto bluetooth_fail;
- }
return 0;
- bluetooth_fail:
- rfkill_destroy(ehotk->eeepc_bluetooth_rfkill);
- rfkill_unregister(ehotk->eeepc_wlan_rfkill);
- wlan_fail:
- rfkill_destroy(ehotk->eeepc_wlan_rfkill);
- eeepc_unregister_rfkill_notifier("\\_SB.PCI0.P0P6");
- eeepc_unregister_rfkill_notifier("\\_SB.PCI0.P0P7");
ehotk_fail:
kfree(ehotk);
ehotk = NULL;
@@ -718,22 +869,39 @@ static int eeepc_hotk_add(struct acpi_device *device)
static int eeepc_hotk_remove(struct acpi_device *device, int type)
{
- acpi_status status = 0;
-
if (!device || !acpi_driver_data(device))
return -EINVAL;
- status = acpi_remove_notify_handler(ehotk->handle, ACPI_SYSTEM_NOTIFY,
- eeepc_hotk_notify);
- if (ACPI_FAILURE(status))
- printk(EEEPC_ERR "Error removing notify handler\n");
-
- eeepc_unregister_rfkill_notifier("\\_SB.PCI0.P0P6");
- eeepc_unregister_rfkill_notifier("\\_SB.PCI0.P0P7");
kfree(ehotk);
return 0;
}
+static int eeepc_hotk_resume(struct acpi_device *device)
+{
+ if (ehotk->wlan_rfkill) {
+ bool wlan;
+
+ /* Workaround - it seems that _PTS disables the wireless
+ without notification or changing the value read by WLAN.
+ Normally this is fine because the correct value is restored
+ from the non-volatile storage on resume, but we need to do
+ it ourself if case suspend is aborted, or we lose wireless.
+ */
+ wlan = get_acpi(CM_ASL_WLAN);
+ set_acpi(CM_ASL_WLAN, wlan);
+
+ rfkill_set_sw_state(ehotk->wlan_rfkill, wlan != 1);
+
+ eeepc_rfkill_hotplug();
+ }
+
+ if (ehotk->bluetooth_rfkill)
+ rfkill_set_sw_state(ehotk->bluetooth_rfkill,
+ get_acpi(CM_ASL_BLUETOOTH) != 1);
+
+ return 0;
+}
+
/*
* Hwmon
*/
@@ -850,10 +1018,16 @@ static void eeepc_backlight_exit(void)
static void eeepc_rfkill_exit(void)
{
- if (ehotk->eeepc_wlan_rfkill)
- rfkill_unregister(ehotk->eeepc_wlan_rfkill);
- if (ehotk->eeepc_bluetooth_rfkill)
- rfkill_unregister(ehotk->eeepc_bluetooth_rfkill);
+ eeepc_unregister_rfkill_notifier("\\_SB.PCI0.P0P6");
+ eeepc_unregister_rfkill_notifier("\\_SB.PCI0.P0P7");
+ if (ehotk->wlan_rfkill)
+ rfkill_unregister(ehotk->wlan_rfkill);
+ if (ehotk->bluetooth_rfkill)
+ rfkill_unregister(ehotk->bluetooth_rfkill);
+ if (ehotk->wwan3g_rfkill)
+ rfkill_unregister(ehotk->wwan3g_rfkill);
+ if (ehotk->hotplug_slot)
+ pci_hp_deregister(ehotk->hotplug_slot);
}
static void eeepc_input_exit(void)
@@ -888,6 +1062,75 @@ static void __exit eeepc_laptop_exit(void)
platform_driver_unregister(&platform_driver);
}
+static int eeepc_new_rfkill(struct rfkill **rfkill,
+ const char *name, struct device *dev,
+ enum rfkill_type type, int cm)
+{
+ int result;
+
+ result = get_acpi(cm);
+ if (result < 0)
+ return result;
+
+ *rfkill = rfkill_alloc(name, dev, type,
+ &eeepc_rfkill_ops, (void *)(unsigned long)cm);
+
+ if (!*rfkill)
+ return -EINVAL;
+
+ rfkill_init_sw_state(*rfkill, get_acpi(cm) != 1);
+ result = rfkill_register(*rfkill);
+ if (result) {
+ rfkill_destroy(*rfkill);
+ *rfkill = NULL;
+ return result;
+ }
+ return 0;
+}
+
+
+static int eeepc_rfkill_init(struct device *dev)
+{
+ int result = 0;
+
+ eeepc_register_rfkill_notifier("\\_SB.PCI0.P0P6");
+ eeepc_register_rfkill_notifier("\\_SB.PCI0.P0P7");
+
+ result = eeepc_new_rfkill(&ehotk->wlan_rfkill,
+ "eeepc-wlan", dev,
+ RFKILL_TYPE_WLAN, CM_ASL_WLAN);
+
+ if (result && result != -ENODEV)
+ goto exit;
+
+ result = eeepc_new_rfkill(&ehotk->bluetooth_rfkill,
+ "eeepc-bluetooth", dev,
+ RFKILL_TYPE_BLUETOOTH, CM_ASL_BLUETOOTH);
+
+ if (result && result != -ENODEV)
+ goto exit;
+
+ result = eeepc_new_rfkill(&ehotk->wwan3g_rfkill,
+ "eeepc-wwan3g", dev,
+ RFKILL_TYPE_WWAN, CM_ASL_3G);
+
+ if (result && result != -ENODEV)
+ goto exit;
+
+ result = eeepc_setup_pci_hotplug();
+ /*
+ * If we get -EBUSY then something else is handling the PCI hotplug -
+ * don't fail in this case
+ */
+ if (result == -EBUSY)
+ result = 0;
+
+exit:
+ if (result && result != -ENODEV)
+ eeepc_rfkill_exit();
+ return result;
+}
+
static int eeepc_backlight_init(struct device *dev)
{
struct backlight_device *bd;
@@ -895,8 +1138,7 @@ static int eeepc_backlight_init(struct device *dev)
bd = backlight_device_register(EEEPC_HOTK_FILE, dev,
NULL, &eeepcbl_ops);
if (IS_ERR(bd)) {
- printk(EEEPC_ERR
- "Could not register eeepc backlight device\n");
+ pr_err("Could not register eeepc backlight device\n");
eeepc_backlight_device = NULL;
return PTR_ERR(bd);
}
@@ -915,8 +1157,7 @@ static int eeepc_hwmon_init(struct device *dev)
hwmon = hwmon_device_register(dev);
if (IS_ERR(hwmon)) {
- printk(EEEPC_ERR
- "Could not register eeepc hwmon device\n");
+ pr_err("Could not register eeepc hwmon device\n");
eeepc_hwmon_device = NULL;
return PTR_ERR(hwmon);
}
@@ -942,19 +1183,9 @@ static int __init eeepc_laptop_init(void)
acpi_bus_unregister_driver(&eeepc_hotk_driver);
return -ENODEV;
}
- dev = acpi_get_physical_device(ehotk->device->handle);
- if (!acpi_video_backlight_support()) {
- result = eeepc_backlight_init(dev);
- if (result)
- goto fail_backlight;
- } else
- printk(EEEPC_INFO "Backlight controlled by ACPI video "
- "driver\n");
+ eeepc_enable_camera();
- result = eeepc_hwmon_init(dev);
- if (result)
- goto fail_hwmon;
/* Register platform stuff */
result = platform_driver_register(&platform_driver);
if (result)
@@ -971,7 +1202,33 @@ static int __init eeepc_laptop_init(void)
&platform_attribute_group);
if (result)
goto fail_sysfs;
+
+ dev = &platform_device->dev;
+
+ if (!acpi_video_backlight_support()) {
+ result = eeepc_backlight_init(dev);
+ if (result)
+ goto fail_backlight;
+ } else
+ pr_info("Backlight controlled by ACPI video "
+ "driver\n");
+
+ result = eeepc_hwmon_init(dev);
+ if (result)
+ goto fail_hwmon;
+
+ result = eeepc_rfkill_init(dev);
+ if (result)
+ goto fail_rfkill;
+
return 0;
+fail_rfkill:
+ eeepc_hwmon_exit();
+fail_hwmon:
+ eeepc_backlight_exit();
+fail_backlight:
+ sysfs_remove_group(&platform_device->dev.kobj,
+ &platform_attribute_group);
fail_sysfs:
platform_device_del(platform_device);
fail_platform_device2:
@@ -979,12 +1236,7 @@ fail_platform_device2:
fail_platform_device1:
platform_driver_unregister(&platform_driver);
fail_platform_driver:
- eeepc_hwmon_exit();
-fail_hwmon:
- eeepc_backlight_exit();
-fail_backlight:
eeepc_input_exit();
- eeepc_rfkill_exit();
return result;
}
diff --git a/drivers/platform/x86/hp-wmi.c b/drivers/platform/x86/hp-wmi.c
index 16fffe44e333..ca508564a181 100644
--- a/drivers/platform/x86/hp-wmi.c
+++ b/drivers/platform/x86/hp-wmi.c
@@ -47,7 +47,7 @@ MODULE_ALIAS("wmi:5FB7F034-2C63-45e9-BE91-3D44E2C707E4");
#define HPWMI_DISPLAY_QUERY 0x1
#define HPWMI_HDDTEMP_QUERY 0x2
#define HPWMI_ALS_QUERY 0x3
-#define HPWMI_DOCK_QUERY 0x4
+#define HPWMI_HARDWARE_QUERY 0x4
#define HPWMI_WIRELESS_QUERY 0x5
#define HPWMI_HOTKEY_QUERY 0xc
@@ -75,10 +75,9 @@ struct key_entry {
u16 keycode;
};
-enum { KE_KEY, KE_SW, KE_END };
+enum { KE_KEY, KE_END };
static struct key_entry hp_wmi_keymap[] = {
- {KE_SW, 0x01, SW_DOCK},
{KE_KEY, 0x02, KEY_BRIGHTNESSUP},
{KE_KEY, 0x03, KEY_BRIGHTNESSDOWN},
{KE_KEY, 0x20e6, KEY_PROG1},
@@ -151,13 +150,28 @@ static int hp_wmi_als_state(void)
static int hp_wmi_dock_state(void)
{
- return hp_wmi_perform_query(HPWMI_DOCK_QUERY, 0, 0);
+ int ret = hp_wmi_perform_query(HPWMI_HARDWARE_QUERY, 0, 0);
+
+ if (ret < 0)
+ return ret;
+
+ return ret & 0x1;
+}
+
+static int hp_wmi_tablet_state(void)
+{
+ int ret = hp_wmi_perform_query(HPWMI_HARDWARE_QUERY, 0, 0);
+
+ if (ret < 0)
+ return ret;
+
+ return (ret & 0x4) ? 1 : 0;
}
static int hp_wmi_set_block(void *data, bool blocked)
{
unsigned long b = (unsigned long) data;
- int query = BIT(b + 8) | ((!!blocked) << b);
+ int query = BIT(b + 8) | ((!blocked) << b);
return hp_wmi_perform_query(HPWMI_WIRELESS_QUERY, 1, query);
}
@@ -232,6 +246,15 @@ static ssize_t show_dock(struct device *dev, struct device_attribute *attr,
return sprintf(buf, "%d\n", value);
}
+static ssize_t show_tablet(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ int value = hp_wmi_tablet_state();
+ if (value < 0)
+ return -EINVAL;
+ return sprintf(buf, "%d\n", value);
+}
+
static ssize_t set_als(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
@@ -244,6 +267,7 @@ static DEVICE_ATTR(display, S_IRUGO, show_display, NULL);
static DEVICE_ATTR(hddtemp, S_IRUGO, show_hddtemp, NULL);
static DEVICE_ATTR(als, S_IRUGO | S_IWUSR, show_als, set_als);
static DEVICE_ATTR(dock, S_IRUGO, show_dock, NULL);
+static DEVICE_ATTR(tablet, S_IRUGO, show_tablet, NULL);
static struct key_entry *hp_wmi_get_entry_by_scancode(int code)
{
@@ -326,13 +350,13 @@ static void hp_wmi_notify(u32 value, void *context)
key->keycode, 0);
input_sync(hp_wmi_input_dev);
break;
- case KE_SW:
- input_report_switch(hp_wmi_input_dev,
- key->keycode,
- hp_wmi_dock_state());
- input_sync(hp_wmi_input_dev);
- break;
}
+ } else if (eventcode == 0x1) {
+ input_report_switch(hp_wmi_input_dev, SW_DOCK,
+ hp_wmi_dock_state());
+ input_report_switch(hp_wmi_input_dev, SW_TABLET_MODE,
+ hp_wmi_tablet_state());
+ input_sync(hp_wmi_input_dev);
} else if (eventcode == 0x5) {
if (wifi_rfkill)
rfkill_set_sw_state(wifi_rfkill,
@@ -369,18 +393,19 @@ static int __init hp_wmi_input_setup(void)
set_bit(EV_KEY, hp_wmi_input_dev->evbit);
set_bit(key->keycode, hp_wmi_input_dev->keybit);
break;
- case KE_SW:
- set_bit(EV_SW, hp_wmi_input_dev->evbit);
- set_bit(key->keycode, hp_wmi_input_dev->swbit);
-
- /* Set initial dock state */
- input_report_switch(hp_wmi_input_dev, key->keycode,
- hp_wmi_dock_state());
- input_sync(hp_wmi_input_dev);
- break;
}
}
+ set_bit(EV_SW, hp_wmi_input_dev->evbit);
+ set_bit(SW_DOCK, hp_wmi_input_dev->swbit);
+ set_bit(SW_TABLET_MODE, hp_wmi_input_dev->swbit);
+
+ /* Set initial hardware state */
+ input_report_switch(hp_wmi_input_dev, SW_DOCK, hp_wmi_dock_state());
+ input_report_switch(hp_wmi_input_dev, SW_TABLET_MODE,
+ hp_wmi_tablet_state());
+ input_sync(hp_wmi_input_dev);
+
err = input_register_device(hp_wmi_input_dev);
if (err) {
@@ -397,6 +422,7 @@ static void cleanup_sysfs(struct platform_device *device)
device_remove_file(&device->dev, &dev_attr_hddtemp);
device_remove_file(&device->dev, &dev_attr_als);
device_remove_file(&device->dev, &dev_attr_dock);
+ device_remove_file(&device->dev, &dev_attr_tablet);
}
static int __init hp_wmi_bios_setup(struct platform_device *device)
@@ -416,6 +442,9 @@ static int __init hp_wmi_bios_setup(struct platform_device *device)
err = device_create_file(&device->dev, &dev_attr_dock);
if (err)
goto add_sysfs_error;
+ err = device_create_file(&device->dev, &dev_attr_tablet);
+ if (err)
+ goto add_sysfs_error;
if (wireless & 0x1) {
wifi_rfkill = rfkill_alloc("hp-wifi", &device->dev,
@@ -485,23 +514,17 @@ static int __exit hp_wmi_bios_remove(struct platform_device *device)
static int hp_wmi_resume_handler(struct platform_device *device)
{
- struct key_entry *key;
-
/*
- * Docking state may have changed while suspended, so trigger
- * an input event for the current state. As this is a switch,
+ * Hardware state may have changed while suspended, so trigger
+ * input events for the current state. As this is a switch,
* the input layer will only actually pass it on if the state
* changed.
*/
- for (key = hp_wmi_keymap; key->type != KE_END; key++) {
- switch (key->type) {
- case KE_SW:
- input_report_switch(hp_wmi_input_dev, key->keycode,
- hp_wmi_dock_state());
- input_sync(hp_wmi_input_dev);
- break;
- }
- }
+
+ input_report_switch(hp_wmi_input_dev, SW_DOCK, hp_wmi_dock_state());
+ input_report_switch(hp_wmi_input_dev, SW_TABLET_MODE,
+ hp_wmi_tablet_state());
+ input_sync(hp_wmi_input_dev);
return 0;
}
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
index 86e958539f46..a463fd72c495 100644
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -22,7 +22,7 @@
*/
#define TPACPI_VERSION "0.23"
-#define TPACPI_SYSFS_VERSION 0x020300
+#define TPACPI_SYSFS_VERSION 0x020400
/*
* Changelog:
@@ -257,6 +257,8 @@ static struct {
u32 wan:1;
u32 uwb:1;
u32 fan_ctrl_status_undef:1;
+ u32 second_fan:1;
+ u32 beep_needs_two_args:1;
u32 input_device_registered:1;
u32 platform_drv_registered:1;
u32 platform_drv_attrs_registered:1;
@@ -277,8 +279,10 @@ struct thinkpad_id_data {
char *bios_version_str; /* Something like 1ZET51WW (1.03z) */
char *ec_version_str; /* Something like 1ZHT51WW-1.04a */
- u16 bios_model; /* Big Endian, TP-1Y = 0x5931, 0 = unknown */
+ u16 bios_model; /* 1Y = 0x5931, 0 = unknown */
u16 ec_model;
+ u16 bios_release; /* 1ZETK1WW = 0x314b, 0 = unknown */
+ u16 ec_release;
char *model_str; /* ThinkPad T43 */
char *nummodel_str; /* 9384A9C for a 9384-A9C model */
@@ -355,6 +359,73 @@ static void tpacpi_log_usertask(const char * const what)
} \
} while (0)
+/*
+ * Quirk handling helpers
+ *
+ * ThinkPad IDs and versions seen in the field so far
+ * are two-characters from the set [0-9A-Z], i.e. base 36.
+ *
+ * We use values well outside that range as specials.
+ */
+
+#define TPACPI_MATCH_ANY 0xffffU
+#define TPACPI_MATCH_UNKNOWN 0U
+
+/* TPID('1', 'Y') == 0x5931 */
+#define TPID(__c1, __c2) (((__c2) << 8) | (__c1))
+
+#define TPACPI_Q_IBM(__id1, __id2, __quirk) \
+ { .vendor = PCI_VENDOR_ID_IBM, \
+ .bios = TPID(__id1, __id2), \
+ .ec = TPACPI_MATCH_ANY, \
+ .quirks = (__quirk) }
+
+#define TPACPI_Q_LNV(__id1, __id2, __quirk) \
+ { .vendor = PCI_VENDOR_ID_LENOVO, \
+ .bios = TPID(__id1, __id2), \
+ .ec = TPACPI_MATCH_ANY, \
+ .quirks = (__quirk) }
+
+struct tpacpi_quirk {
+ unsigned int vendor;
+ u16 bios;
+ u16 ec;
+ unsigned long quirks;
+};
+
+/**
+ * tpacpi_check_quirks() - search BIOS/EC version on a list
+ * @qlist: array of &struct tpacpi_quirk
+ * @qlist_size: number of elements in @qlist
+ *
+ * Iterates over a quirks list until one is found that matches the
+ * ThinkPad's vendor, BIOS and EC model.
+ *
+ * Returns 0 if nothing matches, otherwise returns the quirks field of
+ * the matching &struct tpacpi_quirk entry.
+ *
+ * The match criteria is: vendor, ec and bios much match.
+ */
+static unsigned long __init tpacpi_check_quirks(
+ const struct tpacpi_quirk *qlist,
+ unsigned int qlist_size)
+{
+ while (qlist_size) {
+ if ((qlist->vendor == thinkpad_id.vendor ||
+ qlist->vendor == TPACPI_MATCH_ANY) &&
+ (qlist->bios == thinkpad_id.bios_model ||
+ qlist->bios == TPACPI_MATCH_ANY) &&
+ (qlist->ec == thinkpad_id.ec_model ||
+ qlist->ec == TPACPI_MATCH_ANY))
+ return qlist->quirks;
+
+ qlist_size--;
+ qlist++;
+ }
+ return 0;
+}
+
+
/****************************************************************************
****************************************************************************
*
@@ -1163,8 +1234,8 @@ static int __init tpacpi_new_rfkill(const enum tpacpi_rfk_id id,
{
struct tpacpi_rfk *atp_rfk;
int res;
- bool initial_sw_state = false;
- int initial_sw_status;
+ bool sw_state = false;
+ int sw_status;
BUG_ON(id >= TPACPI_RFK_SW_MAX || tpacpi_rfkill_switches[id]);
@@ -1185,17 +1256,17 @@ static int __init tpacpi_new_rfkill(const enum tpacpi_rfk_id id,
atp_rfk->id = id;
atp_rfk->ops = tp_rfkops;
- initial_sw_status = (tp_rfkops->get_status)();
- if (initial_sw_status < 0) {
+ sw_status = (tp_rfkops->get_status)();
+ if (sw_status < 0) {
printk(TPACPI_ERR
"failed to read initial state for %s, error %d\n",
- name, initial_sw_status);
+ name, sw_status);
} else {
- initial_sw_state = (initial_sw_status == TPACPI_RFK_RADIO_OFF);
+ sw_state = (sw_status == TPACPI_RFK_RADIO_OFF);
if (set_default) {
/* try to keep the initial state, since we ask the
* firmware to preserve it across S5 in NVRAM */
- rfkill_set_sw_state(atp_rfk->rfkill, initial_sw_state);
+ rfkill_init_sw_state(atp_rfk->rfkill, sw_state);
}
}
rfkill_set_hw_state(atp_rfk->rfkill, tpacpi_rfk_check_hwblock_state());
@@ -2880,7 +2951,7 @@ static int __init hotkey_init(struct ibm_init_struct *iibm)
/* update bright_acpimode... */
tpacpi_check_std_acpi_brightness_support();
- if (tp_features.bright_acpimode) {
+ if (tp_features.bright_acpimode && acpi_video_backlight_support()) {
printk(TPACPI_INFO
"This ThinkPad has standard ACPI backlight "
"brightness control, supported by the ACPI "
@@ -4773,7 +4844,7 @@ TPACPI_HANDLE(led, ec, "SLED", /* 570 */
"LED", /* all others */
); /* R30, R31 */
-#define TPACPI_LED_NUMLEDS 8
+#define TPACPI_LED_NUMLEDS 16
static struct tpacpi_led_classdev *tpacpi_leds;
static enum led_status_t tpacpi_led_state_cache[TPACPI_LED_NUMLEDS];
static const char * const tpacpi_led_names[TPACPI_LED_NUMLEDS] = {
@@ -4786,15 +4857,20 @@ static const char * const tpacpi_led_names[TPACPI_LED_NUMLEDS] = {
"tpacpi::dock_batt",
"tpacpi::unknown_led",
"tpacpi::standby",
+ "tpacpi::dock_status1",
+ "tpacpi::dock_status2",
+ "tpacpi::unknown_led2",
+ "tpacpi::unknown_led3",
+ "tpacpi::thinkvantage",
};
-#define TPACPI_SAFE_LEDS 0x0081U
+#define TPACPI_SAFE_LEDS 0x1081U
static inline bool tpacpi_is_led_restricted(const unsigned int led)
{
#ifdef CONFIG_THINKPAD_ACPI_UNSAFE_LEDS
return false;
#else
- return (TPACPI_SAFE_LEDS & (1 << led)) == 0;
+ return (1U & (TPACPI_SAFE_LEDS >> led)) == 0;
#endif
}
@@ -4956,6 +5032,10 @@ static int __init tpacpi_init_led(unsigned int led)
tpacpi_leds[led].led = led;
+ /* LEDs with no name don't get registered */
+ if (!tpacpi_led_names[led])
+ return 0;
+
tpacpi_leds[led].led_classdev.brightness_set = &led_sysfs_set;
tpacpi_leds[led].led_classdev.blink_set = &led_sysfs_blink_set;
if (led_supported == TPACPI_LED_570)
@@ -4974,10 +5054,59 @@ static int __init tpacpi_init_led(unsigned int led)
return rc;
}
+static const struct tpacpi_quirk led_useful_qtable[] __initconst = {
+ TPACPI_Q_IBM('1', 'E', 0x009f), /* A30 */
+ TPACPI_Q_IBM('1', 'N', 0x009f), /* A31 */
+ TPACPI_Q_IBM('1', 'G', 0x009f), /* A31 */
+
+ TPACPI_Q_IBM('1', 'I', 0x0097), /* T30 */
+ TPACPI_Q_IBM('1', 'R', 0x0097), /* T40, T41, T42, R50, R51 */
+ TPACPI_Q_IBM('7', '0', 0x0097), /* T43, R52 */
+ TPACPI_Q_IBM('1', 'Y', 0x0097), /* T43 */
+ TPACPI_Q_IBM('1', 'W', 0x0097), /* R50e */
+ TPACPI_Q_IBM('1', 'V', 0x0097), /* R51 */
+ TPACPI_Q_IBM('7', '8', 0x0097), /* R51e */
+ TPACPI_Q_IBM('7', '6', 0x0097), /* R52 */
+
+ TPACPI_Q_IBM('1', 'K', 0x00bf), /* X30 */
+ TPACPI_Q_IBM('1', 'Q', 0x00bf), /* X31, X32 */
+ TPACPI_Q_IBM('1', 'U', 0x00bf), /* X40 */
+ TPACPI_Q_IBM('7', '4', 0x00bf), /* X41 */
+ TPACPI_Q_IBM('7', '5', 0x00bf), /* X41t */
+
+ TPACPI_Q_IBM('7', '9', 0x1f97), /* T60 (1) */
+ TPACPI_Q_IBM('7', '7', 0x1f97), /* Z60* (1) */
+ TPACPI_Q_IBM('7', 'F', 0x1f97), /* Z61* (1) */
+ TPACPI_Q_IBM('7', 'B', 0x1fb7), /* X60 (1) */
+
+ /* (1) - may have excess leds enabled on MSB */
+
+ /* Defaults (order matters, keep last, don't reorder!) */
+ { /* Lenovo */
+ .vendor = PCI_VENDOR_ID_LENOVO,
+ .bios = TPACPI_MATCH_ANY, .ec = TPACPI_MATCH_ANY,
+ .quirks = 0x1fffU,
+ },
+ { /* IBM ThinkPads with no EC version string */
+ .vendor = PCI_VENDOR_ID_IBM,
+ .bios = TPACPI_MATCH_ANY, .ec = TPACPI_MATCH_UNKNOWN,
+ .quirks = 0x00ffU,
+ },
+ { /* IBM ThinkPads with EC version string */
+ .vendor = PCI_VENDOR_ID_IBM,
+ .bios = TPACPI_MATCH_ANY, .ec = TPACPI_MATCH_ANY,
+ .quirks = 0x00bfU,
+ },
+};
+
+#undef TPACPI_LEDQ_IBM
+#undef TPACPI_LEDQ_LNV
+
static int __init led_init(struct ibm_init_struct *iibm)
{
unsigned int i;
int rc;
+ unsigned long useful_leds;
vdbg_printk(TPACPI_DBG_INIT, "initializing LED subdriver\n");
@@ -4999,6 +5128,9 @@ static int __init led_init(struct ibm_init_struct *iibm)
vdbg_printk(TPACPI_DBG_INIT, "LED commands are %s, mode %d\n",
str_supported(led_supported), led_supported);
+ if (led_supported == TPACPI_LED_NONE)
+ return 1;
+
tpacpi_leds = kzalloc(sizeof(*tpacpi_leds) * TPACPI_LED_NUMLEDS,
GFP_KERNEL);
if (!tpacpi_leds) {
@@ -5006,8 +5138,12 @@ static int __init led_init(struct ibm_init_struct *iibm)
return -ENOMEM;
}
+ useful_leds = tpacpi_check_quirks(led_useful_qtable,
+ ARRAY_SIZE(led_useful_qtable));
+
for (i = 0; i < TPACPI_LED_NUMLEDS; i++) {
- if (!tpacpi_is_led_restricted(i)) {
+ if (!tpacpi_is_led_restricted(i) &&
+ test_bit(i, &useful_leds)) {
rc = tpacpi_init_led(i);
if (rc < 0) {
led_exit();
@@ -5017,12 +5153,11 @@ static int __init led_init(struct ibm_init_struct *iibm)
}
#ifdef CONFIG_THINKPAD_ACPI_UNSAFE_LEDS
- if (led_supported != TPACPI_LED_NONE)
- printk(TPACPI_NOTICE
- "warning: userspace override of important "
- "firmware LEDs is enabled\n");
+ printk(TPACPI_NOTICE
+ "warning: userspace override of important "
+ "firmware LEDs is enabled\n");
#endif
- return (led_supported != TPACPI_LED_NONE)? 0 : 1;
+ return 0;
}
#define str_led_status(s) \
@@ -5052,7 +5187,7 @@ static int led_read(char *p)
}
len += sprintf(p + len, "commands:\t"
- "<led> on, <led> off, <led> blink (<led> is 0-7)\n");
+ "<led> on, <led> off, <led> blink (<led> is 0-15)\n");
return len;
}
@@ -5067,7 +5202,7 @@ static int led_write(char *buf)
return -ENODEV;
while ((cmd = next_cmd(&buf))) {
- if (sscanf(cmd, "%d", &led) != 1 || led < 0 || led > 7)
+ if (sscanf(cmd, "%d", &led) != 1 || led < 0 || led > 15)
return -EINVAL;
if (strstr(cmd, "off")) {
@@ -5101,8 +5236,17 @@ static struct ibm_struct led_driver_data = {
TPACPI_HANDLE(beep, ec, "BEEP"); /* all except R30, R31 */
+#define TPACPI_BEEP_Q1 0x0001
+
+static const struct tpacpi_quirk beep_quirk_table[] __initconst = {
+ TPACPI_Q_IBM('I', 'M', TPACPI_BEEP_Q1), /* 570 */
+ TPACPI_Q_IBM('I', 'U', TPACPI_BEEP_Q1), /* 570E - unverified */
+};
+
static int __init beep_init(struct ibm_init_struct *iibm)
{
+ unsigned long quirks;
+
vdbg_printk(TPACPI_DBG_INIT, "initializing beep subdriver\n");
TPACPI_ACPIHANDLE_INIT(beep);
@@ -5110,6 +5254,11 @@ static int __init beep_init(struct ibm_init_struct *iibm)
vdbg_printk(TPACPI_DBG_INIT, "beep is %s\n",
str_supported(beep_handle != NULL));
+ quirks = tpacpi_check_quirks(beep_quirk_table,
+ ARRAY_SIZE(beep_quirk_table));
+
+ tp_features.beep_needs_two_args = !!(quirks & TPACPI_BEEP_Q1);
+
return (beep_handle)? 0 : 1;
}
@@ -5141,8 +5290,15 @@ static int beep_write(char *buf)
/* beep_cmd set */
} else
return -EINVAL;
- if (!acpi_evalf(beep_handle, NULL, NULL, "vdd", beep_cmd, 0))
- return -EIO;
+ if (tp_features.beep_needs_two_args) {
+ if (!acpi_evalf(beep_handle, NULL, NULL, "vdd",
+ beep_cmd, 0))
+ return -EIO;
+ } else {
+ if (!acpi_evalf(beep_handle, NULL, NULL, "vd",
+ beep_cmd))
+ return -EIO;
+ }
}
return 0;
@@ -5569,6 +5725,10 @@ static struct ibm_struct ecdump_driver_data = {
* Bit 3-0: backlight brightness level
*
* brightness_get_raw returns status data in the HBRV layout
+ *
+ * WARNING: The X61 has been verified to use HBRV for something else, so
+ * this should be used _only_ on IBM ThinkPads, and maybe with some careful
+ * testing on the very early *60 Lenovo models...
*/
enum {
@@ -5869,6 +6029,12 @@ static int __init brightness_init(struct ibm_init_struct *iibm)
brightness_mode);
}
+ /* Safety */
+ if (thinkpad_id.vendor != PCI_VENDOR_ID_IBM &&
+ (brightness_mode == TPACPI_BRGHT_MODE_ECNVRAM ||
+ brightness_mode == TPACPI_BRGHT_MODE_EC))
+ return -EINVAL;
+
if (tpacpi_brightness_get_raw(&b) < 0)
return 1;
@@ -6161,6 +6327,21 @@ static struct ibm_struct volume_driver_data = {
* For firmware bugs, refer to:
* http://thinkwiki.org/wiki/Embedded_Controller_Firmware#Firmware_Issues
*
+ * ----
+ *
+ * ThinkPad EC register 0x31 bit 0 (only on select models)
+ *
+ * When bit 0 of EC register 0x31 is zero, the tachometer registers
+ * show the speed of the main fan. When bit 0 of EC register 0x31
+ * is one, the tachometer registers show the speed of the auxiliary
+ * fan.
+ *
+ * Fan control seems to affect both fans, regardless of the state
+ * of this bit.
+ *
+ * So far, only the firmware for the X60/X61 non-tablet versions
+ * seem to support this (firmware TP-7M).
+ *
* TPACPI_FAN_WR_ACPI_FANS:
* ThinkPad X31, X40, X41. Not available in the X60.
*
@@ -6187,6 +6368,8 @@ enum { /* Fan control constants */
fan_status_offset = 0x2f, /* EC register 0x2f */
fan_rpm_offset = 0x84, /* EC register 0x84: LSB, 0x85 MSB (RPM)
* 0x84 must be read before 0x85 */
+ fan_select_offset = 0x31, /* EC register 0x31 (Firmware 7M)
+ bit 0 selects which fan is active */
TP_EC_FAN_FULLSPEED = 0x40, /* EC fan mode: full speed */
TP_EC_FAN_AUTO = 0x80, /* EC fan mode: auto fan control */
@@ -6249,30 +6432,18 @@ TPACPI_HANDLE(sfan, ec, "SFAN", /* 570 */
* We assume 0x07 really means auto mode while this quirk is active,
* as this is far more likely than the ThinkPad being in level 7,
* which is only used by the firmware during thermal emergencies.
+ *
+ * Enable for TP-1Y (T43), TP-78 (R51e), TP-76 (R52),
+ * TP-70 (T43, R52), which are known to be buggy.
*/
-static void fan_quirk1_detect(void)
+static void fan_quirk1_setup(void)
{
- /* In some ThinkPads, neither the EC nor the ACPI
- * DSDT initialize the HFSP register, and it ends up
- * being initially set to 0x07 when it *could* be
- * either 0x07 or 0x80.
- *
- * Enable for TP-1Y (T43), TP-78 (R51e),
- * TP-76 (R52), TP-70 (T43, R52), which are known
- * to be buggy. */
if (fan_control_initial_status == 0x07) {
- switch (thinkpad_id.ec_model) {
- case 0x5931: /* TP-1Y */
- case 0x3837: /* TP-78 */
- case 0x3637: /* TP-76 */
- case 0x3037: /* TP-70 */
- printk(TPACPI_NOTICE
- "fan_init: initial fan status is unknown, "
- "assuming it is in auto mode\n");
- tp_features.fan_ctrl_status_undef = 1;
- ;;
- }
+ printk(TPACPI_NOTICE
+ "fan_init: initial fan status is unknown, "
+ "assuming it is in auto mode\n");
+ tp_features.fan_ctrl_status_undef = 1;
}
}
@@ -6292,6 +6463,38 @@ static void fan_quirk1_handle(u8 *fan_status)
}
}
+/* Select main fan on X60/X61, NOOP on others */
+static bool fan_select_fan1(void)
+{
+ if (tp_features.second_fan) {
+ u8 val;
+
+ if (ec_read(fan_select_offset, &val) < 0)
+ return false;
+ val &= 0xFEU;
+ if (ec_write(fan_select_offset, val) < 0)
+ return false;
+ }
+ return true;
+}
+
+/* Select secondary fan on X60/X61 */
+static bool fan_select_fan2(void)
+{
+ u8 val;
+
+ if (!tp_features.second_fan)
+ return false;
+
+ if (ec_read(fan_select_offset, &val) < 0)
+ return false;
+ val |= 0x01U;
+ if (ec_write(fan_select_offset, val) < 0)
+ return false;
+
+ return true;
+}
+
/*
* Call with fan_mutex held
*/
@@ -6369,6 +6572,8 @@ static int fan_get_speed(unsigned int *speed)
switch (fan_status_access_mode) {
case TPACPI_FAN_RD_TPEC:
/* all except 570, 600e/x, 770e, 770x */
+ if (unlikely(!fan_select_fan1()))
+ return -EIO;
if (unlikely(!acpi_ec_read(fan_rpm_offset, &lo) ||
!acpi_ec_read(fan_rpm_offset + 1, &hi)))
return -EIO;
@@ -6385,6 +6590,34 @@ static int fan_get_speed(unsigned int *speed)
return 0;
}
+static int fan2_get_speed(unsigned int *speed)
+{
+ u8 hi, lo;
+ bool rc;
+
+ switch (fan_status_access_mode) {
+ case TPACPI_FAN_RD_TPEC:
+ /* all except 570, 600e/x, 770e, 770x */
+ if (unlikely(!fan_select_fan2()))
+ return -EIO;
+ rc = !acpi_ec_read(fan_rpm_offset, &lo) ||
+ !acpi_ec_read(fan_rpm_offset + 1, &hi);
+ fan_select_fan1(); /* play it safe */
+ if (rc)
+ return -EIO;
+
+ if (likely(speed))
+ *speed = (hi << 8) | lo;
+
+ break;
+
+ default:
+ return -ENXIO;
+ }
+
+ return 0;
+}
+
static int fan_set_level(int level)
{
if (!fan_control_allowed)
@@ -6790,6 +7023,25 @@ static struct device_attribute dev_attr_fan_fan1_input =
__ATTR(fan1_input, S_IRUGO,
fan_fan1_input_show, NULL);
+/* sysfs fan fan2_input ------------------------------------------------ */
+static ssize_t fan_fan2_input_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ int res;
+ unsigned int speed;
+
+ res = fan2_get_speed(&speed);
+ if (res < 0)
+ return res;
+
+ return snprintf(buf, PAGE_SIZE, "%u\n", speed);
+}
+
+static struct device_attribute dev_attr_fan_fan2_input =
+ __ATTR(fan2_input, S_IRUGO,
+ fan_fan2_input_show, NULL);
+
/* sysfs fan fan_watchdog (hwmon driver) ------------------------------- */
static ssize_t fan_fan_watchdog_show(struct device_driver *drv,
char *buf)
@@ -6823,6 +7075,7 @@ static DRIVER_ATTR(fan_watchdog, S_IWUSR | S_IRUGO,
static struct attribute *fan_attributes[] = {
&dev_attr_fan_pwm1_enable.attr, &dev_attr_fan_pwm1.attr,
&dev_attr_fan_fan1_input.attr,
+ NULL, /* for fan2_input */
NULL
};
@@ -6830,9 +7083,36 @@ static const struct attribute_group fan_attr_group = {
.attrs = fan_attributes,
};
+#define TPACPI_FAN_Q1 0x0001 /* Unitialized HFSP */
+#define TPACPI_FAN_2FAN 0x0002 /* EC 0x31 bit 0 selects fan2 */
+
+#define TPACPI_FAN_QI(__id1, __id2, __quirks) \
+ { .vendor = PCI_VENDOR_ID_IBM, \
+ .bios = TPACPI_MATCH_ANY, \
+ .ec = TPID(__id1, __id2), \
+ .quirks = __quirks }
+
+#define TPACPI_FAN_QL(__id1, __id2, __quirks) \
+ { .vendor = PCI_VENDOR_ID_LENOVO, \
+ .bios = TPACPI_MATCH_ANY, \
+ .ec = TPID(__id1, __id2), \
+ .quirks = __quirks }
+
+static const struct tpacpi_quirk fan_quirk_table[] __initconst = {
+ TPACPI_FAN_QI('1', 'Y', TPACPI_FAN_Q1),
+ TPACPI_FAN_QI('7', '8', TPACPI_FAN_Q1),
+ TPACPI_FAN_QI('7', '6', TPACPI_FAN_Q1),
+ TPACPI_FAN_QI('7', '0', TPACPI_FAN_Q1),
+ TPACPI_FAN_QL('7', 'M', TPACPI_FAN_2FAN),
+};
+
+#undef TPACPI_FAN_QL
+#undef TPACPI_FAN_QI
+
static int __init fan_init(struct ibm_init_struct *iibm)
{
int rc;
+ unsigned long quirks;
vdbg_printk(TPACPI_DBG_INIT | TPACPI_DBG_FAN,
"initializing fan subdriver\n");
@@ -6843,12 +7123,16 @@ static int __init fan_init(struct ibm_init_struct *iibm)
fan_control_commands = 0;
fan_watchdog_maxinterval = 0;
tp_features.fan_ctrl_status_undef = 0;
+ tp_features.second_fan = 0;
fan_control_desired_level = 7;
TPACPI_ACPIHANDLE_INIT(fans);
TPACPI_ACPIHANDLE_INIT(gfan);
TPACPI_ACPIHANDLE_INIT(sfan);
+ quirks = tpacpi_check_quirks(fan_quirk_table,
+ ARRAY_SIZE(fan_quirk_table));
+
if (gfan_handle) {
/* 570, 600e/x, 770e, 770x */
fan_status_access_mode = TPACPI_FAN_RD_ACPI_GFAN;
@@ -6858,7 +7142,13 @@ static int __init fan_init(struct ibm_init_struct *iibm)
if (likely(acpi_ec_read(fan_status_offset,
&fan_control_initial_status))) {
fan_status_access_mode = TPACPI_FAN_RD_TPEC;
- fan_quirk1_detect();
+ if (quirks & TPACPI_FAN_Q1)
+ fan_quirk1_setup();
+ if (quirks & TPACPI_FAN_2FAN) {
+ tp_features.second_fan = 1;
+ dbg_printk(TPACPI_DBG_INIT | TPACPI_DBG_FAN,
+ "secondary fan support enabled\n");
+ }
} else {
printk(TPACPI_ERR
"ThinkPad ACPI EC access misbehaving, "
@@ -6914,6 +7204,11 @@ static int __init fan_init(struct ibm_init_struct *iibm)
if (fan_status_access_mode != TPACPI_FAN_NONE ||
fan_control_access_mode != TPACPI_FAN_WR_NONE) {
+ if (tp_features.second_fan) {
+ /* attach second fan tachometer */
+ fan_attributes[ARRAY_SIZE(fan_attributes)-2] =
+ &dev_attr_fan_fan2_input.attr;
+ }
rc = sysfs_create_group(&tpacpi_sensors_pdev->dev.kobj,
&fan_attr_group);
if (rc < 0)
@@ -7385,6 +7680,24 @@ err_out:
/* Probing */
+static bool __pure __init tpacpi_is_fw_digit(const char c)
+{
+ return (c >= '0' && c <= '9') || (c >= 'A' && c <= 'Z');
+}
+
+/* Most models: xxyTkkWW (#.##c); Ancient 570/600 and -SL lacks (#.##c) */
+static bool __pure __init tpacpi_is_valid_fw_id(const char* const s,
+ const char t)
+{
+ return s && strlen(s) >= 8 &&
+ tpacpi_is_fw_digit(s[0]) &&
+ tpacpi_is_fw_digit(s[1]) &&
+ s[2] == t && s[3] == 'T' &&
+ tpacpi_is_fw_digit(s[4]) &&
+ tpacpi_is_fw_digit(s[5]) &&
+ s[6] == 'W' && s[7] == 'W';
+}
+
/* returns 0 - probe ok, or < 0 - probe error.
* Probe ok doesn't mean thinkpad found.
* On error, kfree() cleanup on tp->* is not performed, caller must do it */
@@ -7411,10 +7724,15 @@ static int __must_check __init get_thinkpad_model_data(
tp->bios_version_str = kstrdup(s, GFP_KERNEL);
if (s && !tp->bios_version_str)
return -ENOMEM;
- if (!tp->bios_version_str)
+
+ /* Really ancient ThinkPad 240X will fail this, which is fine */
+ if (!tpacpi_is_valid_fw_id(tp->bios_version_str, 'E'))
return 0;
+
tp->bios_model = tp->bios_version_str[0]
| (tp->bios_version_str[1] << 8);
+ tp->bios_release = (tp->bios_version_str[4] << 8)
+ | tp->bios_version_str[5];
/*
* ThinkPad T23 or newer, A31 or newer, R50e or newer,
@@ -7433,8 +7751,21 @@ static int __must_check __init get_thinkpad_model_data(
tp->ec_version_str = kstrdup(ec_fw_string, GFP_KERNEL);
if (!tp->ec_version_str)
return -ENOMEM;
- tp->ec_model = ec_fw_string[0]
- | (ec_fw_string[1] << 8);
+
+ if (tpacpi_is_valid_fw_id(ec_fw_string, 'H')) {
+ tp->ec_model = ec_fw_string[0]
+ | (ec_fw_string[1] << 8);
+ tp->ec_release = (ec_fw_string[4] << 8)
+ | ec_fw_string[5];
+ } else {
+ printk(TPACPI_NOTICE
+ "ThinkPad firmware release %s "
+ "doesn't match the known patterns\n",
+ ec_fw_string);
+ printk(TPACPI_NOTICE
+ "please report this to %s\n",
+ TPACPI_MAIL);
+ }
break;
}
}
diff --git a/drivers/pnp/pnpacpi/rsparser.c b/drivers/pnp/pnpacpi/rsparser.c
index 7f207f335bec..ef3a2cd3a7a0 100644
--- a/drivers/pnp/pnpacpi/rsparser.c
+++ b/drivers/pnp/pnpacpi/rsparser.c
@@ -287,6 +287,25 @@ static void pnpacpi_parse_allocated_address_space(struct pnp_dev *dev,
ACPI_DECODE_16);
}
+static void pnpacpi_parse_allocated_ext_address_space(struct pnp_dev *dev,
+ struct acpi_resource *res)
+{
+ struct acpi_resource_extended_address64 *p = &res->data.ext_address64;
+
+ if (p->producer_consumer == ACPI_PRODUCER)
+ return;
+
+ if (p->resource_type == ACPI_MEMORY_RANGE)
+ pnpacpi_parse_allocated_memresource(dev,
+ p->minimum, p->address_length,
+ p->info.mem.write_protect);
+ else if (p->resource_type == ACPI_IO_RANGE)
+ pnpacpi_parse_allocated_ioresource(dev,
+ p->minimum, p->address_length,
+ p->granularity == 0xfff ? ACPI_DECODE_10 :
+ ACPI_DECODE_16);
+}
+
static acpi_status pnpacpi_allocated_resource(struct acpi_resource *res,
void *data)
{
@@ -400,8 +419,7 @@ static acpi_status pnpacpi_allocated_resource(struct acpi_resource *res,
break;
case ACPI_RESOURCE_TYPE_EXTENDED_ADDRESS64:
- if (res->data.ext_address64.producer_consumer == ACPI_PRODUCER)
- return AE_OK;
+ pnpacpi_parse_allocated_ext_address_space(dev, res);
break;
case ACPI_RESOURCE_TYPE_EXTENDED_IRQ:
@@ -630,6 +648,28 @@ static __init void pnpacpi_parse_address_option(struct pnp_dev *dev,
IORESOURCE_IO_FIXED);
}
+static __init void pnpacpi_parse_ext_address_option(struct pnp_dev *dev,
+ unsigned int option_flags,
+ struct acpi_resource *r)
+{
+ struct acpi_resource_extended_address64 *p = &r->data.ext_address64;
+ unsigned char flags = 0;
+
+ if (p->address_length == 0)
+ return;
+
+ if (p->resource_type == ACPI_MEMORY_RANGE) {
+ if (p->info.mem.write_protect == ACPI_READ_WRITE_MEMORY)
+ flags = IORESOURCE_MEM_WRITEABLE;
+ pnp_register_mem_resource(dev, option_flags, p->minimum,
+ p->minimum, 0, p->address_length,
+ flags);
+ } else if (p->resource_type == ACPI_IO_RANGE)
+ pnp_register_port_resource(dev, option_flags, p->minimum,
+ p->minimum, 0, p->address_length,
+ IORESOURCE_IO_FIXED);
+}
+
struct acpipnp_parse_option_s {
struct pnp_dev *dev;
unsigned int option_flags;
@@ -711,6 +751,7 @@ static __init acpi_status pnpacpi_option_resource(struct acpi_resource *res,
break;
case ACPI_RESOURCE_TYPE_EXTENDED_ADDRESS64:
+ pnpacpi_parse_ext_address_option(dev, option_flags, res);
break;
case ACPI_RESOURCE_TYPE_EXTENDED_IRQ:
@@ -765,6 +806,7 @@ static int pnpacpi_supported_resource(struct acpi_resource *res)
case ACPI_RESOURCE_TYPE_ADDRESS16:
case ACPI_RESOURCE_TYPE_ADDRESS32:
case ACPI_RESOURCE_TYPE_ADDRESS64:
+ case ACPI_RESOURCE_TYPE_EXTENDED_ADDRESS64:
case ACPI_RESOURCE_TYPE_EXTENDED_IRQ:
return 1;
}
diff --git a/drivers/power/Kconfig b/drivers/power/Kconfig
index 33da1127992a..7eda34838bfe 100644
--- a/drivers/power/Kconfig
+++ b/drivers/power/Kconfig
@@ -82,6 +82,14 @@ config BATTERY_DA9030
Say Y here to enable support for batteries charger integrated into
DA9030 PMIC.
+config BATTERY_MAX17040
+ tristate "Maxim MAX17040 Fuel Gauge"
+ depends on I2C
+ help
+ MAX17040 is fuel-gauge systems for lithium-ion (Li+) batteries
+ in handheld and portable equipment. The MAX17040 is configured
+ to operate with a single lithium cell
+
config CHARGER_PCF50633
tristate "NXP PCF50633 MBC"
depends on MFD_PCF50633
diff --git a/drivers/power/Makefile b/drivers/power/Makefile
index 2fcf41d13e5c..daf3179689aa 100644
--- a/drivers/power/Makefile
+++ b/drivers/power/Makefile
@@ -25,4 +25,5 @@ obj-$(CONFIG_BATTERY_TOSA) += tosa_battery.o
obj-$(CONFIG_BATTERY_WM97XX) += wm97xx_battery.o
obj-$(CONFIG_BATTERY_BQ27x00) += bq27x00_battery.o
obj-$(CONFIG_BATTERY_DA9030) += da9030_battery.o
-obj-$(CONFIG_CHARGER_PCF50633) += pcf50633-charger.o \ No newline at end of file
+obj-$(CONFIG_BATTERY_MAX17040) += max17040_battery.o
+obj-$(CONFIG_CHARGER_PCF50633) += pcf50633-charger.o
diff --git a/drivers/power/da9030_battery.c b/drivers/power/da9030_battery.c
index 1662bb0f23a5..3364198134a1 100644
--- a/drivers/power/da9030_battery.c
+++ b/drivers/power/da9030_battery.c
@@ -22,8 +22,6 @@
#include <linux/debugfs.h>
#include <linux/seq_file.h>
-#define DA9030_STATUS_CHDET (1 << 3)
-
#define DA9030_FAULT_LOG 0x0a
#define DA9030_FAULT_LOG_OVER_TEMP (1 << 7)
#define DA9030_FAULT_LOG_VBAT_OVER (1 << 4)
@@ -244,6 +242,8 @@ static void da9030_set_charge(struct da9030_charger *charger, int on)
}
da903x_write(charger->master, DA9030_CHARGE_CONTROL, val);
+
+ power_supply_changed(&charger->psy);
}
static void da9030_charger_check_state(struct da9030_charger *charger)
@@ -258,6 +258,12 @@ static void da9030_charger_check_state(struct da9030_charger *charger)
da9030_set_charge(charger, 1);
}
} else {
+ /* Charger has been pulled out */
+ if (!charger->chdet) {
+ da9030_set_charge(charger, 0);
+ return;
+ }
+
if (charger->adc.vbat_res >=
charger->thresholds.vbat_charge_stop) {
da9030_set_charge(charger, 0);
@@ -395,13 +401,11 @@ static int da9030_battery_event(struct notifier_block *nb, unsigned long event,
{
struct da9030_charger *charger =
container_of(nb, struct da9030_charger, nb);
- int status;
switch (event) {
case DA9030_EVENT_CHDET:
- status = da903x_query_status(charger->master,
- DA9030_STATUS_CHDET);
- da9030_set_charge(charger, status);
+ cancel_delayed_work_sync(&charger->work);
+ schedule_work(&charger->work.work);
break;
case DA9030_EVENT_VBATMON:
da9030_battery_vbat_event(charger);
@@ -565,7 +569,8 @@ static int da9030_battery_remove(struct platform_device *dev)
da903x_unregister_notifier(charger->master, &charger->nb,
DA9030_EVENT_CHDET | DA9030_EVENT_VBATMON |
DA9030_EVENT_CHIOVER | DA9030_EVENT_TBAT);
- cancel_delayed_work(&charger->work);
+ cancel_delayed_work_sync(&charger->work);
+ da9030_set_charge(charger, 0);
power_supply_unregister(&charger->psy);
kfree(charger);
diff --git a/drivers/power/ds2760_battery.c b/drivers/power/ds2760_battery.c
index a52d4a11652d..520b5c49ff30 100644
--- a/drivers/power/ds2760_battery.c
+++ b/drivers/power/ds2760_battery.c
@@ -62,6 +62,10 @@ static unsigned int cache_time = 1000;
module_param(cache_time, uint, 0644);
MODULE_PARM_DESC(cache_time, "cache time in milliseconds");
+static unsigned int pmod_enabled;
+module_param(pmod_enabled, bool, 0644);
+MODULE_PARM_DESC(pmod_enabled, "PMOD enable bit");
+
/* Some batteries have their rated capacity stored a N * 10 mAh, while
* others use an index into this table. */
static int rated_capacities[] = {
@@ -259,6 +263,17 @@ static void ds2760_battery_update_status(struct ds2760_device_info *di)
power_supply_changed(&di->bat);
}
+static void ds2760_battery_write_status(struct ds2760_device_info *di,
+ char status)
+{
+ if (status == di->raw[DS2760_STATUS_REG])
+ return;
+
+ w1_ds2760_write(di->w1_dev, &status, DS2760_STATUS_WRITE_REG, 1);
+ w1_ds2760_store_eeprom(di->w1_dev, DS2760_EEPROM_BLOCK1);
+ w1_ds2760_recall_eeprom(di->w1_dev, DS2760_EEPROM_BLOCK1);
+}
+
static void ds2760_battery_work(struct work_struct *work)
{
struct ds2760_device_info *di = container_of(work,
@@ -342,9 +357,9 @@ static enum power_supply_property ds2760_battery_props[] = {
static int ds2760_battery_probe(struct platform_device *pdev)
{
+ char status;
int retval = 0;
struct ds2760_device_info *di;
- struct ds2760_platform_data *pdata;
di = kzalloc(sizeof(*di), GFP_KERNEL);
if (!di) {
@@ -354,14 +369,13 @@ static int ds2760_battery_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, di);
- pdata = pdev->dev.platform_data;
- di->dev = &pdev->dev;
- di->w1_dev = pdev->dev.parent;
- di->bat.name = dev_name(&pdev->dev);
- di->bat.type = POWER_SUPPLY_TYPE_BATTERY;
- di->bat.properties = ds2760_battery_props;
- di->bat.num_properties = ARRAY_SIZE(ds2760_battery_props);
- di->bat.get_property = ds2760_battery_get_property;
+ di->dev = &pdev->dev;
+ di->w1_dev = pdev->dev.parent;
+ di->bat.name = dev_name(&pdev->dev);
+ di->bat.type = POWER_SUPPLY_TYPE_BATTERY;
+ di->bat.properties = ds2760_battery_props;
+ di->bat.num_properties = ARRAY_SIZE(ds2760_battery_props);
+ di->bat.get_property = ds2760_battery_get_property;
di->bat.external_power_changed =
ds2760_battery_external_power_changed;
@@ -373,6 +387,16 @@ static int ds2760_battery_probe(struct platform_device *pdev)
goto batt_failed;
}
+ /* enable sleep mode feature */
+ ds2760_battery_read_status(di);
+ status = di->raw[DS2760_STATUS_REG];
+ if (pmod_enabled)
+ status |= DS2760_STATUS_PMOD;
+ else
+ status &= ~DS2760_STATUS_PMOD;
+
+ ds2760_battery_write_status(di, status);
+
INIT_DELAYED_WORK(&di->monitor_work, ds2760_battery_work);
di->monitor_wqueue = create_singlethread_workqueue(dev_name(&pdev->dev));
if (!di->monitor_wqueue) {
diff --git a/drivers/power/max17040_battery.c b/drivers/power/max17040_battery.c
new file mode 100644
index 000000000000..87b98bf27ae1
--- /dev/null
+++ b/drivers/power/max17040_battery.c
@@ -0,0 +1,309 @@
+/*
+ * max17040_battery.c
+ * fuel-gauge systems for lithium-ion (Li+) batteries
+ *
+ * Copyright (C) 2009 Samsung Electronics
+ * Minkyu Kang <mk7.kang@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/mutex.h>
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/delay.h>
+#include <linux/power_supply.h>
+#include <linux/max17040_battery.h>
+
+#define MAX17040_VCELL_MSB 0x02
+#define MAX17040_VCELL_LSB 0x03
+#define MAX17040_SOC_MSB 0x04
+#define MAX17040_SOC_LSB 0x05
+#define MAX17040_MODE_MSB 0x06
+#define MAX17040_MODE_LSB 0x07
+#define MAX17040_VER_MSB 0x08
+#define MAX17040_VER_LSB 0x09
+#define MAX17040_RCOMP_MSB 0x0C
+#define MAX17040_RCOMP_LSB 0x0D
+#define MAX17040_CMD_MSB 0xFE
+#define MAX17040_CMD_LSB 0xFF
+
+#define MAX17040_DELAY 1000
+#define MAX17040_BATTERY_FULL 95
+
+struct max17040_chip {
+ struct i2c_client *client;
+ struct delayed_work work;
+ struct power_supply battery;
+ struct max17040_platform_data *pdata;
+
+ /* State Of Connect */
+ int online;
+ /* battery voltage */
+ int vcell;
+ /* battery capacity */
+ int soc;
+ /* State Of Charge */
+ int status;
+};
+
+static int max17040_get_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ struct max17040_chip *chip = container_of(psy,
+ struct max17040_chip, battery);
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_STATUS:
+ val->intval = chip->status;
+ break;
+ case POWER_SUPPLY_PROP_ONLINE:
+ val->intval = chip->online;
+ break;
+ case POWER_SUPPLY_PROP_VOLTAGE_NOW:
+ val->intval = chip->vcell;
+ break;
+ case POWER_SUPPLY_PROP_CAPACITY:
+ val->intval = chip->soc;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int max17040_write_reg(struct i2c_client *client, int reg, u8 value)
+{
+ int ret;
+
+ ret = i2c_smbus_write_byte_data(client, reg, value);
+
+ if (ret < 0)
+ dev_err(&client->dev, "%s: err %d\n", __func__, ret);
+
+ return ret;
+}
+
+static int max17040_read_reg(struct i2c_client *client, int reg)
+{
+ int ret;
+
+ ret = i2c_smbus_read_byte_data(client, reg);
+
+ if (ret < 0)
+ dev_err(&client->dev, "%s: err %d\n", __func__, ret);
+
+ return ret;
+}
+
+static void max17040_reset(struct i2c_client *client)
+{
+ max17040_write_reg(client, MAX17040_CMD_MSB, 0x54);
+ max17040_write_reg(client, MAX17040_CMD_LSB, 0x00);
+}
+
+static void max17040_get_vcell(struct i2c_client *client)
+{
+ struct max17040_chip *chip = i2c_get_clientdata(client);
+ u8 msb;
+ u8 lsb;
+
+ msb = max17040_read_reg(client, MAX17040_VCELL_MSB);
+ lsb = max17040_read_reg(client, MAX17040_VCELL_LSB);
+
+ chip->vcell = (msb << 4) + (lsb >> 4);
+}
+
+static void max17040_get_soc(struct i2c_client *client)
+{
+ struct max17040_chip *chip = i2c_get_clientdata(client);
+ u8 msb;
+ u8 lsb;
+
+ msb = max17040_read_reg(client, MAX17040_SOC_MSB);
+ lsb = max17040_read_reg(client, MAX17040_SOC_LSB);
+
+ chip->soc = msb;
+}
+
+static void max17040_get_version(struct i2c_client *client)
+{
+ u8 msb;
+ u8 lsb;
+
+ msb = max17040_read_reg(client, MAX17040_VER_MSB);
+ lsb = max17040_read_reg(client, MAX17040_VER_LSB);
+
+ dev_info(&client->dev, "MAX17040 Fuel-Gauge Ver %d%d\n", msb, lsb);
+}
+
+static void max17040_get_online(struct i2c_client *client)
+{
+ struct max17040_chip *chip = i2c_get_clientdata(client);
+
+ if (chip->pdata->battery_online)
+ chip->online = chip->pdata->battery_online();
+ else
+ chip->online = 1;
+}
+
+static void max17040_get_status(struct i2c_client *client)
+{
+ struct max17040_chip *chip = i2c_get_clientdata(client);
+
+ if (!chip->pdata->charger_online || !chip->pdata->charger_enable) {
+ chip->status = POWER_SUPPLY_STATUS_UNKNOWN;
+ return;
+ }
+
+ if (chip->pdata->charger_online()) {
+ if (chip->pdata->charger_enable())
+ chip->status = POWER_SUPPLY_STATUS_CHARGING;
+ else
+ chip->status = POWER_SUPPLY_STATUS_NOT_CHARGING;
+ } else {
+ chip->status = POWER_SUPPLY_STATUS_DISCHARGING;
+ }
+
+ if (chip->soc > MAX17040_BATTERY_FULL)
+ chip->status = POWER_SUPPLY_STATUS_FULL;
+}
+
+static void max17040_work(struct work_struct *work)
+{
+ struct max17040_chip *chip;
+
+ chip = container_of(work, struct max17040_chip, work.work);
+
+ max17040_get_vcell(chip->client);
+ max17040_get_soc(chip->client);
+ max17040_get_online(chip->client);
+ max17040_get_status(chip->client);
+
+ schedule_delayed_work(&chip->work, MAX17040_DELAY);
+}
+
+static enum power_supply_property max17040_battery_props[] = {
+ POWER_SUPPLY_PROP_STATUS,
+ POWER_SUPPLY_PROP_ONLINE,
+ POWER_SUPPLY_PROP_VOLTAGE_NOW,
+ POWER_SUPPLY_PROP_CAPACITY,
+};
+
+static int __devinit max17040_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent);
+ struct max17040_chip *chip;
+ int ret;
+
+ if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE))
+ return -EIO;
+
+ chip = kzalloc(sizeof(*chip), GFP_KERNEL);
+ if (!chip)
+ return -ENOMEM;
+
+ chip->client = client;
+ chip->pdata = client->dev.platform_data;
+
+ i2c_set_clientdata(client, chip);
+
+ chip->battery.name = "battery";
+ chip->battery.type = POWER_SUPPLY_TYPE_BATTERY;
+ chip->battery.get_property = max17040_get_property;
+ chip->battery.properties = max17040_battery_props;
+ chip->battery.num_properties = ARRAY_SIZE(max17040_battery_props);
+
+ ret = power_supply_register(&client->dev, &chip->battery);
+ if (ret) {
+ dev_err(&client->dev, "failed: power supply register\n");
+ i2c_set_clientdata(client, NULL);
+ kfree(chip);
+ return ret;
+ }
+
+ max17040_reset(client);
+ max17040_get_version(client);
+
+ INIT_DELAYED_WORK_DEFERRABLE(&chip->work, max17040_work);
+ schedule_delayed_work(&chip->work, MAX17040_DELAY);
+
+ return 0;
+}
+
+static int __devexit max17040_remove(struct i2c_client *client)
+{
+ struct max17040_chip *chip = i2c_get_clientdata(client);
+
+ power_supply_unregister(&chip->battery);
+ cancel_delayed_work(&chip->work);
+ i2c_set_clientdata(client, NULL);
+ kfree(chip);
+ return 0;
+}
+
+#ifdef CONFIG_PM
+
+static int max17040_suspend(struct i2c_client *client,
+ pm_message_t state)
+{
+ struct max17040_chip *chip = i2c_get_clientdata(client);
+
+ cancel_delayed_work(&chip->work);
+ return 0;
+}
+
+static int max17040_resume(struct i2c_client *client)
+{
+ struct max17040_chip *chip = i2c_get_clientdata(client);
+
+ schedule_delayed_work(&chip->work, MAX17040_DELAY);
+ return 0;
+}
+
+#else
+
+#define max17040_suspend NULL
+#define max17040_resume NULL
+
+#endif /* CONFIG_PM */
+
+static const struct i2c_device_id max17040_id[] = {
+ { "max17040", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, max17040_id);
+
+static struct i2c_driver max17040_i2c_driver = {
+ .driver = {
+ .name = "max17040",
+ },
+ .probe = max17040_probe,
+ .remove = __devexit_p(max17040_remove),
+ .suspend = max17040_suspend,
+ .resume = max17040_resume,
+ .id_table = max17040_id,
+};
+
+static int __init max17040_init(void)
+{
+ return i2c_add_driver(&max17040_i2c_driver);
+}
+module_init(max17040_init);
+
+static void __exit max17040_exit(void)
+{
+ i2c_del_driver(&max17040_i2c_driver);
+}
+module_exit(max17040_exit);
+
+MODULE_AUTHOR("Minkyu Kang <mk7.kang@samsung.com>");
+MODULE_DESCRIPTION("MAX17040 Fuel Gauge");
+MODULE_LICENSE("GPL");
diff --git a/drivers/rtc/rtc-bfin.c b/drivers/rtc/rtc-bfin.c
index aafd3e6ebb0d..a118eb0f1e67 100644
--- a/drivers/rtc/rtc-bfin.c
+++ b/drivers/rtc/rtc-bfin.c
@@ -1,8 +1,8 @@
/*
* Blackfin On-Chip Real Time Clock Driver
- * Supports BF52[257]/BF53[123]/BF53[467]/BF54[24789]
+ * Supports BF51x/BF52x/BF53[123]/BF53[467]/BF54x
*
- * Copyright 2004-2008 Analog Devices Inc.
+ * Copyright 2004-2009 Analog Devices Inc.
*
* Enter bugs at http://blackfin.uclinux.org/
*
@@ -363,7 +363,7 @@ static int __devinit bfin_rtc_probe(struct platform_device *pdev)
struct bfin_rtc *rtc;
struct device *dev = &pdev->dev;
int ret = 0;
- unsigned long timeout;
+ unsigned long timeout = jiffies + HZ;
dev_dbg_stamp(dev);
@@ -374,32 +374,32 @@ static int __devinit bfin_rtc_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, rtc);
device_init_wakeup(dev, 1);
+ /* Register our RTC with the RTC framework */
+ rtc->rtc_dev = rtc_device_register(pdev->name, dev, &bfin_rtc_ops,
+ THIS_MODULE);
+ if (unlikely(IS_ERR(rtc->rtc_dev))) {
+ ret = PTR_ERR(rtc->rtc_dev);
+ goto err;
+ }
+
/* Grab the IRQ and init the hardware */
ret = request_irq(IRQ_RTC, bfin_rtc_interrupt, IRQF_SHARED, pdev->name, dev);
if (unlikely(ret))
- goto err;
+ goto err_reg;
/* sometimes the bootloader touched things, but the write complete was not
* enabled, so let's just do a quick timeout here since the IRQ will not fire ...
*/
- timeout = jiffies + HZ;
while (bfin_read_RTC_ISTAT() & RTC_ISTAT_WRITE_PENDING)
if (time_after(jiffies, timeout))
break;
bfin_rtc_reset(dev, RTC_ISTAT_WRITE_COMPLETE);
bfin_write_RTC_SWCNT(0);
- /* Register our RTC with the RTC framework */
- rtc->rtc_dev = rtc_device_register(pdev->name, dev, &bfin_rtc_ops, THIS_MODULE);
- if (unlikely(IS_ERR(rtc->rtc_dev))) {
- ret = PTR_ERR(rtc->rtc_dev);
- goto err_irq;
- }
-
return 0;
- err_irq:
- free_irq(IRQ_RTC, dev);
- err:
+err_reg:
+ rtc_device_unregister(rtc->rtc_dev);
+err:
kfree(rtc);
return ret;
}
diff --git a/drivers/rtc/rtc-ds1374.c b/drivers/rtc/rtc-ds1374.c
index 32b27739ec2a..713f7bf5afb3 100644
--- a/drivers/rtc/rtc-ds1374.c
+++ b/drivers/rtc/rtc-ds1374.c
@@ -283,7 +283,7 @@ static void ds1374_work(struct work_struct *work)
stat = i2c_smbus_read_byte_data(client, DS1374_REG_SR);
if (stat < 0)
- return;
+ goto unlock;
if (stat & DS1374_REG_SR_AF) {
stat &= ~DS1374_REG_SR_AF;
@@ -302,7 +302,7 @@ static void ds1374_work(struct work_struct *work)
out:
if (!ds1374->exiting)
enable_irq(client->irq);
-
+unlock:
mutex_unlock(&ds1374->mutex);
}
diff --git a/drivers/rtc/rtc-vr41xx.c b/drivers/rtc/rtc-vr41xx.c
index f11297aff854..2c839d0d21bd 100644
--- a/drivers/rtc/rtc-vr41xx.c
+++ b/drivers/rtc/rtc-vr41xx.c
@@ -1,7 +1,7 @@
/*
* Driver for NEC VR4100 series Real Time Clock unit.
*
- * Copyright (C) 2003-2008 Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp>
+ * Copyright (C) 2003-2008 Yoichi Yuasa <yuasa@linux-mips.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -33,7 +33,7 @@
#include <asm/io.h>
#include <asm/uaccess.h>
-MODULE_AUTHOR("Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp>");
+MODULE_AUTHOR("Yoichi Yuasa <yuasa@linux-mips.org>");
MODULE_DESCRIPTION("NEC VR4100 series RTC driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index e5b84db0aa03..749836668655 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -470,7 +470,7 @@ static int dasd_decrease_state(struct dasd_device *device)
*/
static void dasd_change_state(struct dasd_device *device)
{
- int rc;
+ int rc;
if (device->state == device->target)
/* Already where we want to go today... */
@@ -479,8 +479,10 @@ static void dasd_change_state(struct dasd_device *device)
rc = dasd_increase_state(device);
else
rc = dasd_decrease_state(device);
- if (rc && rc != -EAGAIN)
- device->target = device->state;
+ if (rc == -EAGAIN)
+ return;
+ if (rc)
+ device->target = device->state;
if (device->state == device->target) {
wake_up(&dasd_init_waitq);
@@ -2503,15 +2505,25 @@ int dasd_generic_restore_device(struct ccw_device *cdev)
if (IS_ERR(device))
return PTR_ERR(device);
+ /* allow new IO again */
+ device->stopped &= ~DASD_STOPPED_PM;
+ device->stopped &= ~DASD_UNRESUMED_PM;
+
dasd_schedule_device_bh(device);
if (device->block)
dasd_schedule_block_bh(device->block);
if (device->discipline->restore)
rc = device->discipline->restore(device);
+ if (rc)
+ /*
+ * if the resume failed for the DASD we put it in
+ * an UNRESUMED stop state
+ */
+ device->stopped |= DASD_UNRESUMED_PM;
dasd_put_device(device);
- return rc;
+ return 0;
}
EXPORT_SYMBOL_GPL(dasd_generic_restore_device);
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index 1c28ec3e4ccb..f8b1f04f26b8 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -3243,9 +3243,6 @@ int dasd_eckd_restore_device(struct dasd_device *device)
int is_known, rc;
struct dasd_uid temp_uid;
- /* allow new IO again */
- device->stopped &= ~DASD_STOPPED_PM;
-
private = (struct dasd_eckd_private *) device->private;
/* Read Configuration Data */
@@ -3295,12 +3292,7 @@ int dasd_eckd_restore_device(struct dasd_device *device)
return 0;
out_err:
- /*
- * if the resume failed for the DASD we put it in
- * an UNRESUMED stop state
- */
- device->stopped |= DASD_UNRESUMED_PM;
- return 0;
+ return -1;
}
static struct ccw_driver dasd_eckd_driver = {
diff --git a/drivers/s390/char/con3215.c b/drivers/s390/char/con3215.c
index 04dc734805c6..21639d6c996f 100644
--- a/drivers/s390/char/con3215.c
+++ b/drivers/s390/char/con3215.c
@@ -20,10 +20,7 @@
#include <linux/interrupt.h>
#include <linux/err.h>
#include <linux/reboot.h>
-
#include <linux/slab.h>
-#include <linux/bootmem.h>
-
#include <asm/ccwdev.h>
#include <asm/cio.h>
#include <asm/io.h>
@@ -735,7 +732,7 @@ static int raw3215_pm_stop(struct ccw_device *cdev)
unsigned long flags;
/* Empty the output buffer, then prevent new I/O. */
- raw = cdev->dev.driver_data;
+ raw = dev_get_drvdata(&cdev->dev);
spin_lock_irqsave(get_ccwdev_lock(raw->cdev), flags);
raw3215_make_room(raw, RAW3215_BUFFER_SIZE);
raw->flags |= RAW3215_FROZEN;
@@ -749,7 +746,7 @@ static int raw3215_pm_start(struct ccw_device *cdev)
unsigned long flags;
/* Allow I/O again and flush output buffer. */
- raw = cdev->dev.driver_data;
+ raw = dev_get_drvdata(&cdev->dev);
spin_lock_irqsave(get_ccwdev_lock(raw->cdev), flags);
raw->flags &= ~RAW3215_FROZEN;
raw->flags |= RAW3215_FLUSHING;
@@ -883,7 +880,7 @@ static int __init con3215_init(void)
raw3215_freelist = NULL;
spin_lock_init(&raw3215_freelist_lock);
for (i = 0; i < NR_3215_REQ; i++) {
- req = (struct raw3215_req *) alloc_bootmem_low(sizeof(struct raw3215_req));
+ req = kzalloc(sizeof(struct raw3215_req), GFP_KERNEL | GFP_DMA);
req->next = raw3215_freelist;
raw3215_freelist = req;
}
@@ -893,10 +890,9 @@ static int __init con3215_init(void)
return -ENODEV;
raw3215[0] = raw = (struct raw3215_info *)
- alloc_bootmem_low(sizeof(struct raw3215_info));
- memset(raw, 0, sizeof(struct raw3215_info));
- raw->buffer = (char *) alloc_bootmem_low(RAW3215_BUFFER_SIZE);
- raw->inbuf = (char *) alloc_bootmem_low(RAW3215_INBUF_SIZE);
+ kzalloc(sizeof(struct raw3215_info), GFP_KERNEL | GFP_DMA);
+ raw->buffer = kzalloc(RAW3215_BUFFER_SIZE, GFP_KERNEL | GFP_DMA);
+ raw->inbuf = kzalloc(RAW3215_INBUF_SIZE, GFP_KERNEL | GFP_DMA);
raw->cdev = cdev;
dev_set_drvdata(&cdev->dev, raw);
cdev->handler = raw3215_irq;
@@ -906,9 +902,9 @@ static int __init con3215_init(void)
/* Request the console irq */
if (raw3215_startup(raw) != 0) {
- free_bootmem((unsigned long) raw->inbuf, RAW3215_INBUF_SIZE);
- free_bootmem((unsigned long) raw->buffer, RAW3215_BUFFER_SIZE);
- free_bootmem((unsigned long) raw, sizeof(struct raw3215_info));
+ kfree(raw->inbuf);
+ kfree(raw->buffer);
+ kfree(raw);
raw3215[0] = NULL;
return -ENODEV;
}
diff --git a/drivers/s390/char/con3270.c b/drivers/s390/char/con3270.c
index 44d02e371c04..bb838bdf829d 100644
--- a/drivers/s390/char/con3270.c
+++ b/drivers/s390/char/con3270.c
@@ -7,7 +7,6 @@
* Copyright IBM Corp. 2003, 2009
*/
-#include <linux/bootmem.h>
#include <linux/console.h>
#include <linux/init.h>
#include <linux/interrupt.h>
@@ -600,16 +599,14 @@ con3270_init(void)
if (IS_ERR(rp))
return PTR_ERR(rp);
- condev = (struct con3270 *) alloc_bootmem_low(sizeof(struct con3270));
- memset(condev, 0, sizeof(struct con3270));
+ condev = kzalloc(sizeof(struct con3270), GFP_KERNEL | GFP_DMA);
condev->view.dev = rp;
- condev->read = raw3270_request_alloc_bootmem(0);
+ condev->read = raw3270_request_alloc(0);
condev->read->callback = con3270_read_callback;
condev->read->callback_data = condev;
- condev->write =
- raw3270_request_alloc_bootmem(CON3270_OUTPUT_BUFFER_SIZE);
- condev->kreset = raw3270_request_alloc_bootmem(1);
+ condev->write = raw3270_request_alloc(CON3270_OUTPUT_BUFFER_SIZE);
+ condev->kreset = raw3270_request_alloc(1);
INIT_LIST_HEAD(&condev->lines);
INIT_LIST_HEAD(&condev->update);
@@ -623,7 +620,7 @@ con3270_init(void)
INIT_LIST_HEAD(&condev->freemem);
for (i = 0; i < CON3270_STRING_PAGES; i++) {
- cbuf = (void *) alloc_bootmem_low_pages(PAGE_SIZE);
+ cbuf = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
add_string_memory(&condev->freemem, cbuf, PAGE_SIZE);
}
condev->cline = alloc_string(&condev->freemem, condev->view.cols);
diff --git a/drivers/s390/char/monreader.c b/drivers/s390/char/monreader.c
index 75a8831eebbc..7892550d7932 100644
--- a/drivers/s390/char/monreader.c
+++ b/drivers/s390/char/monreader.c
@@ -320,7 +320,7 @@ static int mon_open(struct inode *inode, struct file *filp)
goto out_path;
}
filp->private_data = monpriv;
- monreader_device->driver_data = monpriv;
+ dev_set_drvdata(&monreader_device, monpriv);
unlock_kernel();
return nonseekable_open(inode, filp);
@@ -463,7 +463,7 @@ static struct miscdevice mon_dev = {
*****************************************************************************/
static int monreader_freeze(struct device *dev)
{
- struct mon_private *monpriv = dev->driver_data;
+ struct mon_private *monpriv = dev_get_drvdata(&dev);
int rc;
if (!monpriv)
@@ -487,7 +487,7 @@ static int monreader_freeze(struct device *dev)
static int monreader_thaw(struct device *dev)
{
- struct mon_private *monpriv = dev->driver_data;
+ struct mon_private *monpriv = dev_get_drvdata(dev);
int rc;
if (!monpriv)
diff --git a/drivers/s390/char/raw3270.c b/drivers/s390/char/raw3270.c
index acab7b2dfe8a..d6a022f55e92 100644
--- a/drivers/s390/char/raw3270.c
+++ b/drivers/s390/char/raw3270.c
@@ -7,7 +7,6 @@
* Copyright IBM Corp. 2003, 2009
*/
-#include <linux/bootmem.h>
#include <linux/module.h>
#include <linux/err.h>
#include <linux/init.h>
@@ -143,33 +142,6 @@ raw3270_request_alloc(size_t size)
return rq;
}
-#ifdef CONFIG_TN3270_CONSOLE
-/*
- * Allocate a new 3270 ccw request from bootmem. Only works very
- * early in the boot process. Only con3270.c should be using this.
- */
-struct raw3270_request __init *raw3270_request_alloc_bootmem(size_t size)
-{
- struct raw3270_request *rq;
-
- rq = alloc_bootmem_low(sizeof(struct raw3270));
-
- /* alloc output buffer. */
- if (size > 0)
- rq->buffer = alloc_bootmem_low(size);
- rq->size = size;
- INIT_LIST_HEAD(&rq->list);
-
- /*
- * Setup ccw.
- */
- rq->ccw.cda = __pa(rq->buffer);
- rq->ccw.flags = CCW_FLAG_SLI;
-
- return rq;
-}
-#endif
-
/*
* Free 3270 ccw request
*/
@@ -846,8 +818,8 @@ struct raw3270 __init *raw3270_setup_console(struct ccw_device *cdev)
char *ascebc;
int rc;
- rp = (struct raw3270 *) alloc_bootmem_low(sizeof(struct raw3270));
- ascebc = (char *) alloc_bootmem(256);
+ rp = kzalloc(sizeof(struct raw3270), GFP_KERNEL | GFP_DMA);
+ ascebc = kzalloc(256, GFP_KERNEL);
rc = raw3270_setup_device(cdev, rp, ascebc);
if (rc)
return ERR_PTR(rc);
@@ -1350,7 +1322,7 @@ static int raw3270_pm_stop(struct ccw_device *cdev)
struct raw3270_view *view;
unsigned long flags;
- rp = cdev->dev.driver_data;
+ rp = dev_get_drvdata(&cdev->dev);
if (!rp)
return 0;
spin_lock_irqsave(get_ccwdev_lock(rp->cdev), flags);
@@ -1376,7 +1348,7 @@ static int raw3270_pm_start(struct ccw_device *cdev)
struct raw3270 *rp;
unsigned long flags;
- rp = cdev->dev.driver_data;
+ rp = dev_get_drvdata(&cdev->dev);
if (!rp)
return 0;
spin_lock_irqsave(get_ccwdev_lock(rp->cdev), flags);
diff --git a/drivers/s390/char/sclp_con.c b/drivers/s390/char/sclp_con.c
index 336811a77672..ad698d30cb3b 100644
--- a/drivers/s390/char/sclp_con.c
+++ b/drivers/s390/char/sclp_con.c
@@ -11,7 +11,6 @@
#include <linux/init.h>
#include <linux/timer.h>
#include <linux/jiffies.h>
-#include <linux/bootmem.h>
#include <linux/termios.h>
#include <linux/err.h>
#include <linux/reboot.h>
@@ -110,7 +109,7 @@ static void sclp_console_sync_queue(void)
spin_lock_irqsave(&sclp_con_lock, flags);
if (timer_pending(&sclp_con_timer))
- del_timer_sync(&sclp_con_timer);
+ del_timer(&sclp_con_timer);
while (sclp_con_queue_running) {
spin_unlock_irqrestore(&sclp_con_lock, flags);
sclp_sync_wait();
@@ -298,8 +297,8 @@ sclp_console_init(void)
/* Allocate pages for output buffering */
INIT_LIST_HEAD(&sclp_con_pages);
for (i = 0; i < MAX_CONSOLE_PAGES; i++) {
- page = alloc_bootmem_low_pages(PAGE_SIZE);
- list_add_tail((struct list_head *) page, &sclp_con_pages);
+ page = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
+ list_add_tail(page, &sclp_con_pages);
}
INIT_LIST_HEAD(&sclp_con_outqueue);
spin_lock_init(&sclp_con_lock);
diff --git a/drivers/s390/char/sclp_vt220.c b/drivers/s390/char/sclp_vt220.c
index 5518e24946aa..178724f2a4c3 100644
--- a/drivers/s390/char/sclp_vt220.c
+++ b/drivers/s390/char/sclp_vt220.c
@@ -20,7 +20,6 @@
#include <linux/major.h>
#include <linux/console.h>
#include <linux/kdev_t.h>
-#include <linux/bootmem.h>
#include <linux/interrupt.h>
#include <linux/init.h>
#include <linux/reboot.h>
@@ -601,10 +600,7 @@ static void __init __sclp_vt220_free_pages(void)
list_for_each_safe(page, p, &sclp_vt220_empty) {
list_del(page);
- if (slab_is_available())
- free_page((unsigned long) page);
- else
- free_bootmem((unsigned long) page, PAGE_SIZE);
+ free_page((unsigned long) page);
}
}
@@ -640,16 +636,12 @@ static int __init __sclp_vt220_init(int num_pages)
sclp_vt220_flush_later = 0;
/* Allocate pages for output buffering */
+ rc = -ENOMEM;
for (i = 0; i < num_pages; i++) {
- if (slab_is_available())
- page = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
- else
- page = alloc_bootmem_low_pages(PAGE_SIZE);
- if (!page) {
- rc = -ENOMEM;
+ page = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
+ if (!page)
goto out;
- }
- list_add_tail((struct list_head *) page, &sclp_vt220_empty);
+ list_add_tail(page, &sclp_vt220_empty);
}
rc = sclp_register(&sclp_vt220_register);
out:
diff --git a/drivers/s390/char/tape_core.c b/drivers/s390/char/tape_core.c
index 595aa04cfd01..1d420d947596 100644
--- a/drivers/s390/char/tape_core.c
+++ b/drivers/s390/char/tape_core.c
@@ -396,7 +396,7 @@ int tape_generic_pm_suspend(struct ccw_device *cdev)
{
struct tape_device *device;
- device = cdev->dev.driver_data;
+ device = dev_get_drvdata(&cdev->dev);
if (!device) {
return -ENODEV;
}
diff --git a/drivers/s390/char/vmlogrdr.c b/drivers/s390/char/vmlogrdr.c
index 411cfa3c7719..c20a4fe6da51 100644
--- a/drivers/s390/char/vmlogrdr.c
+++ b/drivers/s390/char/vmlogrdr.c
@@ -663,7 +663,7 @@ static struct attribute *vmlogrdr_attrs[] = {
static int vmlogrdr_pm_prepare(struct device *dev)
{
int rc;
- struct vmlogrdr_priv_t *priv = dev->driver_data;
+ struct vmlogrdr_priv_t *priv = dev_get_drvdata(dev);
rc = 0;
if (priv) {
@@ -753,7 +753,7 @@ static int vmlogrdr_register_device(struct vmlogrdr_priv_t *priv)
dev->bus = &iucv_bus;
dev->parent = iucv_root;
dev->driver = &vmlogrdr_driver;
- dev->driver_data = priv;
+ dev_set_drvdata(dev, priv);
/*
* The release function could be called after the
* module has been unloaded. It's _only_ task is to
diff --git a/drivers/s390/char/vmur.c b/drivers/s390/char/vmur.c
index 7d9e67cb6471..31b902e94f7b 100644
--- a/drivers/s390/char/vmur.c
+++ b/drivers/s390/char/vmur.c
@@ -170,7 +170,7 @@ static void urdev_put(struct urdev *urd)
*/
static int ur_pm_suspend(struct ccw_device *cdev)
{
- struct urdev *urd = cdev->dev.driver_data;
+ struct urdev *urd = dev_get_drvdata(&cdev->dev);
TRACE("ur_pm_suspend: cdev=%p\n", cdev);
if (urd->open_flag) {
diff --git a/drivers/s390/cio/qdio.h b/drivers/s390/cio/qdio.h
index 13bcb8114388..b1241f8fae88 100644
--- a/drivers/s390/cio/qdio.h
+++ b/drivers/s390/cio/qdio.h
@@ -351,15 +351,6 @@ static inline unsigned long long get_usecs(void)
((bufnr - dec) & QDIO_MAX_BUFFERS_MASK)
/* prototypes for thin interrupt */
-void qdio_sync_after_thinint(struct qdio_q *q);
-int get_buf_state(struct qdio_q *q, unsigned int bufnr, unsigned char *state,
- int auto_ack);
-void qdio_check_outbound_after_thinint(struct qdio_q *q);
-int qdio_inbound_q_moved(struct qdio_q *q);
-void qdio_kick_handler(struct qdio_q *q);
-void qdio_stop_polling(struct qdio_q *q);
-int qdio_siga_sync_q(struct qdio_q *q);
-
void qdio_setup_thinint(struct qdio_irq *irq_ptr);
int qdio_establish_thinint(struct qdio_irq *irq_ptr);
void qdio_shutdown_thinint(struct qdio_irq *irq_ptr);
@@ -392,4 +383,6 @@ void qdio_setup_destroy_sysfs(struct ccw_device *cdev);
int qdio_setup_init(void);
void qdio_setup_exit(void);
+int debug_get_buf_state(struct qdio_q *q, unsigned int bufnr,
+ unsigned char *state);
#endif /* _CIO_QDIO_H */
diff --git a/drivers/s390/cio/qdio_debug.c b/drivers/s390/cio/qdio_debug.c
index e3434b34f86c..b8626d4df116 100644
--- a/drivers/s390/cio/qdio_debug.c
+++ b/drivers/s390/cio/qdio_debug.c
@@ -70,9 +70,8 @@ static int qstat_show(struct seq_file *m, void *v)
seq_printf(m, "slsb buffer states:\n");
seq_printf(m, "|0 |8 |16 |24 |32 |40 |48 |56 63|\n");
- qdio_siga_sync_q(q);
for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; i++) {
- get_buf_state(q, i, &state, 0);
+ debug_get_buf_state(q, i, &state);
switch (state) {
case SLSB_P_INPUT_NOT_INIT:
case SLSB_P_OUTPUT_NOT_INIT:
diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c
index d79cf5bf0e62..0038750ad945 100644
--- a/drivers/s390/cio/qdio_main.c
+++ b/drivers/s390/cio/qdio_main.c
@@ -231,8 +231,8 @@ static inline int get_buf_states(struct qdio_q *q, unsigned int bufnr,
return i;
}
-inline int get_buf_state(struct qdio_q *q, unsigned int bufnr,
- unsigned char *state, int auto_ack)
+static inline int get_buf_state(struct qdio_q *q, unsigned int bufnr,
+ unsigned char *state, int auto_ack)
{
return get_buf_states(q, bufnr, state, 1, auto_ack);
}
@@ -276,7 +276,7 @@ void qdio_init_buf_states(struct qdio_irq *irq_ptr)
QDIO_MAX_BUFFERS_PER_Q);
}
-static int qdio_siga_sync(struct qdio_q *q, unsigned int output,
+static inline int qdio_siga_sync(struct qdio_q *q, unsigned int output,
unsigned int input)
{
int cc;
@@ -293,7 +293,7 @@ static int qdio_siga_sync(struct qdio_q *q, unsigned int output,
return cc;
}
-inline int qdio_siga_sync_q(struct qdio_q *q)
+static inline int qdio_siga_sync_q(struct qdio_q *q)
{
if (q->is_input_q)
return qdio_siga_sync(q, 0, q->mask);
@@ -358,8 +358,7 @@ static inline int qdio_siga_input(struct qdio_q *q)
return cc;
}
-/* called from thinint inbound handler */
-void qdio_sync_after_thinint(struct qdio_q *q)
+static inline void qdio_sync_after_thinint(struct qdio_q *q)
{
if (pci_out_supported(q)) {
if (need_siga_sync_thinint(q))
@@ -370,7 +369,14 @@ void qdio_sync_after_thinint(struct qdio_q *q)
qdio_siga_sync_q(q);
}
-inline void qdio_stop_polling(struct qdio_q *q)
+int debug_get_buf_state(struct qdio_q *q, unsigned int bufnr,
+ unsigned char *state)
+{
+ qdio_siga_sync_q(q);
+ return get_buf_states(q, bufnr, state, 1, 0);
+}
+
+static inline void qdio_stop_polling(struct qdio_q *q)
{
if (!q->u.in.polling)
return;
@@ -449,13 +455,6 @@ static inline void inbound_primed(struct qdio_q *q, int count)
count--;
if (!count)
return;
-
- /*
- * Need to change all PRIMED buffers to NOT_INIT, otherwise
- * we're loosing initiative in the thinint code.
- */
- set_buf_states(q, q->first_to_check, SLSB_P_INPUT_NOT_INIT,
- count);
}
static int get_inbound_buffer_frontier(struct qdio_q *q)
@@ -470,19 +469,13 @@ static int get_inbound_buffer_frontier(struct qdio_q *q)
count = min(atomic_read(&q->nr_buf_used), QDIO_MAX_BUFFERS_MASK);
stop = add_buf(q->first_to_check, count);
- /*
- * No siga sync here, as a PCI or we after a thin interrupt
- * will sync the queues.
- */
-
- /* need to set count to 1 for non-qebsm */
- if (!is_qebsm(q))
- count = 1;
-
-check_next:
if (q->first_to_check == stop)
goto out;
+ /*
+ * No siga sync here, as a PCI or we after a thin interrupt
+ * already sync'ed the queues.
+ */
count = get_buf_states(q, q->first_to_check, &state, count, 1);
if (!count)
goto out;
@@ -490,14 +483,9 @@ check_next:
switch (state) {
case SLSB_P_INPUT_PRIMED:
inbound_primed(q, count);
- /*
- * No siga-sync needed for non-qebsm here, as the inbound queue
- * will be synced on the next siga-r, resp.
- * tiqdio_is_inbound_q_done will do the siga-sync.
- */
q->first_to_check = add_buf(q->first_to_check, count);
atomic_sub(count, &q->nr_buf_used);
- goto check_next;
+ break;
case SLSB_P_INPUT_ERROR:
announce_buffer_error(q, count);
/* process the buffer, the upper layer will take care of it */
@@ -516,7 +504,7 @@ out:
return q->first_to_check;
}
-int qdio_inbound_q_moved(struct qdio_q *q)
+static int qdio_inbound_q_moved(struct qdio_q *q)
{
int bufnr;
@@ -524,35 +512,32 @@ int qdio_inbound_q_moved(struct qdio_q *q)
if ((bufnr != q->last_move) || q->qdio_error) {
q->last_move = bufnr;
- if (!need_siga_sync(q) && !pci_out_supported(q))
+ if (!is_thinint_irq(q->irq_ptr) && !MACHINE_IS_VM)
q->u.in.timestamp = get_usecs();
-
- DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in moved");
return 1;
} else
return 0;
}
-static int qdio_inbound_q_done(struct qdio_q *q)
+static inline int qdio_inbound_q_done(struct qdio_q *q)
{
unsigned char state = 0;
if (!atomic_read(&q->nr_buf_used))
return 1;
- /*
- * We need that one for synchronization with the adapter, as it
- * does a kind of PCI avoidance.
- */
qdio_siga_sync_q(q);
-
get_buf_state(q, q->first_to_check, &state, 0);
+
if (state == SLSB_P_INPUT_PRIMED)
- /* we got something to do */
+ /* more work coming */
return 0;
- /* on VM, we don't poll, so the q is always done here */
- if (need_siga_sync(q) || pci_out_supported(q))
+ if (is_thinint_irq(q->irq_ptr))
+ return 1;
+
+ /* don't poll under z/VM */
+ if (MACHINE_IS_VM)
return 1;
/*
@@ -563,14 +548,11 @@ static int qdio_inbound_q_done(struct qdio_q *q)
DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in done:%3d",
q->first_to_check);
return 1;
- } else {
- DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in notd:%3d",
- q->first_to_check);
+ } else
return 0;
- }
}
-void qdio_kick_handler(struct qdio_q *q)
+static void qdio_kick_handler(struct qdio_q *q)
{
int start = q->first_to_kick;
int end = q->first_to_check;
@@ -619,7 +601,6 @@ again:
goto again;
}
-/* inbound tasklet */
void qdio_inbound_processing(unsigned long data)
{
struct qdio_q *q = (struct qdio_q *)data;
@@ -642,11 +623,6 @@ static int get_outbound_buffer_frontier(struct qdio_q *q)
count = min(atomic_read(&q->nr_buf_used), QDIO_MAX_BUFFERS_MASK);
stop = add_buf(q->first_to_check, count);
- /* need to set count to 1 for non-qebsm */
- if (!is_qebsm(q))
- count = 1;
-
-check_next:
if (q->first_to_check == stop)
return q->first_to_check;
@@ -661,13 +637,7 @@ check_next:
atomic_sub(count, &q->nr_buf_used);
q->first_to_check = add_buf(q->first_to_check, count);
- /*
- * We fetch all buffer states at once. get_buf_states may
- * return count < stop. For QEBSM we do not loop.
- */
- if (is_qebsm(q))
- break;
- goto check_next;
+ break;
case SLSB_P_OUTPUT_ERROR:
announce_buffer_error(q, count);
/* process the buffer, the upper layer will take care of it */
@@ -797,8 +767,7 @@ void qdio_outbound_timer(unsigned long data)
tasklet_schedule(&q->tasklet);
}
-/* called from thinint inbound tasklet */
-void qdio_check_outbound_after_thinint(struct qdio_q *q)
+static inline void qdio_check_outbound_after_thinint(struct qdio_q *q)
{
struct qdio_q *out;
int i;
@@ -811,6 +780,46 @@ void qdio_check_outbound_after_thinint(struct qdio_q *q)
tasklet_schedule(&out->tasklet);
}
+static void __tiqdio_inbound_processing(struct qdio_q *q)
+{
+ qdio_perf_stat_inc(&perf_stats.thinint_inbound);
+ qdio_sync_after_thinint(q);
+
+ /*
+ * The interrupt could be caused by a PCI request. Check the
+ * PCI capable outbound queues.
+ */
+ qdio_check_outbound_after_thinint(q);
+
+ if (!qdio_inbound_q_moved(q))
+ return;
+
+ qdio_kick_handler(q);
+
+ if (!qdio_inbound_q_done(q)) {
+ qdio_perf_stat_inc(&perf_stats.thinint_inbound_loop);
+ if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED))
+ tasklet_schedule(&q->tasklet);
+ }
+
+ qdio_stop_polling(q);
+ /*
+ * We need to check again to not lose initiative after
+ * resetting the ACK state.
+ */
+ if (!qdio_inbound_q_done(q)) {
+ qdio_perf_stat_inc(&perf_stats.thinint_inbound_loop2);
+ if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED))
+ tasklet_schedule(&q->tasklet);
+ }
+}
+
+void tiqdio_inbound_processing(unsigned long data)
+{
+ struct qdio_q *q = (struct qdio_q *)data;
+ __tiqdio_inbound_processing(q);
+}
+
static inline void qdio_set_state(struct qdio_irq *irq_ptr,
enum qdio_irq_states state)
{
@@ -1488,18 +1497,13 @@ out:
* @count: how many buffers to process
*/
int do_QDIO(struct ccw_device *cdev, unsigned int callflags,
- int q_nr, int bufnr, int count)
+ int q_nr, unsigned int bufnr, unsigned int count)
{
struct qdio_irq *irq_ptr;
- if ((bufnr > QDIO_MAX_BUFFERS_PER_Q) ||
- (count > QDIO_MAX_BUFFERS_PER_Q) ||
- (q_nr >= QDIO_MAX_QUEUES_PER_IRQ))
+ if (bufnr >= QDIO_MAX_BUFFERS_PER_Q || count > QDIO_MAX_BUFFERS_PER_Q)
return -EINVAL;
- if (!count)
- return 0;
-
irq_ptr = cdev->private->qdio_data;
if (!irq_ptr)
return -ENODEV;
diff --git a/drivers/s390/cio/qdio_thinint.c b/drivers/s390/cio/qdio_thinint.c
index c655d011a78d..981a77ea7ee2 100644
--- a/drivers/s390/cio/qdio_thinint.c
+++ b/drivers/s390/cio/qdio_thinint.c
@@ -43,9 +43,6 @@ struct indicator_t {
};
static struct indicator_t *q_indicators;
-static void tiqdio_tasklet_fn(unsigned long data);
-static DECLARE_TASKLET(tiqdio_tasklet, tiqdio_tasklet_fn, 0);
-
static int css_qdio_omit_svs;
static inline unsigned long do_clear_global_summary(void)
@@ -103,11 +100,6 @@ void tiqdio_add_input_queues(struct qdio_irq *irq_ptr)
xchg(irq_ptr->dsci, 1);
}
-/*
- * we cannot stop the tiqdio tasklet here since it is for all
- * thinint qdio devices and it must run as long as there is a
- * thinint device left
- */
void tiqdio_remove_input_queues(struct qdio_irq *irq_ptr)
{
struct qdio_q *q;
@@ -126,79 +118,39 @@ void tiqdio_remove_input_queues(struct qdio_irq *irq_ptr)
}
}
-static inline int tiqdio_inbound_q_done(struct qdio_q *q)
-{
- unsigned char state = 0;
-
- if (!atomic_read(&q->nr_buf_used))
- return 1;
-
- qdio_siga_sync_q(q);
- get_buf_state(q, q->first_to_check, &state, 0);
-
- if (state == SLSB_P_INPUT_PRIMED)
- /* more work coming */
- return 0;
- return 1;
-}
-
static inline int shared_ind(struct qdio_irq *irq_ptr)
{
return irq_ptr->dsci == &q_indicators[TIQDIO_SHARED_IND].ind;
}
-static void __tiqdio_inbound_processing(struct qdio_q *q)
+/**
+ * tiqdio_thinint_handler - thin interrupt handler for qdio
+ * @ind: pointer to adapter local summary indicator
+ * @drv_data: NULL
+ */
+static void tiqdio_thinint_handler(void *ind, void *drv_data)
{
- qdio_perf_stat_inc(&perf_stats.thinint_inbound);
- qdio_sync_after_thinint(q);
+ struct qdio_q *q;
+
+ qdio_perf_stat_inc(&perf_stats.thin_int);
/*
- * Maybe we have work on our outbound queues... at least
- * we have to check the PCI capable queues.
+ * SVS only when needed: issue SVS to benefit from iqdio interrupt
+ * avoidance (SVS clears adapter interrupt suppression overwrite)
*/
- qdio_check_outbound_after_thinint(q);
-
- if (!qdio_inbound_q_moved(q))
- return;
-
- qdio_kick_handler(q);
-
- if (!tiqdio_inbound_q_done(q)) {
- qdio_perf_stat_inc(&perf_stats.thinint_inbound_loop);
- if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED))
- tasklet_schedule(&q->tasklet);
- }
+ if (!css_qdio_omit_svs)
+ do_clear_global_summary();
- qdio_stop_polling(q);
/*
- * We need to check again to not lose initiative after
- * resetting the ACK state.
+ * reset local summary indicator (tiqdio_alsi) to stop adapter
+ * interrupts for now
*/
- if (!tiqdio_inbound_q_done(q)) {
- qdio_perf_stat_inc(&perf_stats.thinint_inbound_loop2);
- if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED))
- tasklet_schedule(&q->tasklet);
- }
-}
-
-void tiqdio_inbound_processing(unsigned long data)
-{
- struct qdio_q *q = (struct qdio_q *)data;
-
- __tiqdio_inbound_processing(q);
-}
-
-/* check for work on all inbound thinint queues */
-static void tiqdio_tasklet_fn(unsigned long data)
-{
- struct qdio_q *q;
-
- qdio_perf_stat_inc(&perf_stats.tasklet_thinint);
-again:
+ xchg((u8 *)ind, 0);
/* protect tiq_list entries, only changed in activate or shutdown */
rcu_read_lock();
+ /* check for work on all inbound thinint queues */
list_for_each_entry_rcu(q, &tiq_list, entry)
/* only process queues from changed sets */
if (*q->irq_ptr->dsci) {
@@ -226,37 +178,6 @@ again:
if (*tiqdio_alsi)
xchg(&q_indicators[TIQDIO_SHARED_IND].ind, 1);
}
-
- /* check for more work */
- if (*tiqdio_alsi) {
- xchg(tiqdio_alsi, 0);
- qdio_perf_stat_inc(&perf_stats.tasklet_thinint_loop);
- goto again;
- }
-}
-
-/**
- * tiqdio_thinint_handler - thin interrupt handler for qdio
- * @ind: pointer to adapter local summary indicator
- * @drv_data: NULL
- */
-static void tiqdio_thinint_handler(void *ind, void *drv_data)
-{
- qdio_perf_stat_inc(&perf_stats.thin_int);
-
- /*
- * SVS only when needed: issue SVS to benefit from iqdio interrupt
- * avoidance (SVS clears adapter interrupt suppression overwrite)
- */
- if (!css_qdio_omit_svs)
- do_clear_global_summary();
-
- /*
- * reset local summary indicator (tiqdio_alsi) to stop adapter
- * interrupts for now, the tasklet will clean all dsci's
- */
- xchg((u8 *)ind, 0);
- tasklet_hi_schedule(&tiqdio_tasklet);
}
static int set_subchannel_ind(struct qdio_irq *irq_ptr, int reset)
@@ -376,5 +297,4 @@ void __exit tiqdio_unregister_thinints(void)
s390_unregister_adapter_interrupt(tiqdio_alsi, QDIO_AIRQ_ISC);
isc_unregister(QDIO_AIRQ_ISC);
}
- tasklet_kill(&tiqdio_tasklet);
}
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
index 9c148406b980..727a809636d8 100644
--- a/drivers/s390/crypto/ap_bus.c
+++ b/drivers/s390/crypto/ap_bus.c
@@ -54,6 +54,12 @@ static int ap_poll_thread_start(void);
static void ap_poll_thread_stop(void);
static void ap_request_timeout(unsigned long);
static inline void ap_schedule_poll_timer(void);
+static int __ap_poll_device(struct ap_device *ap_dev, unsigned long *flags);
+static int ap_device_remove(struct device *dev);
+static int ap_device_probe(struct device *dev);
+static void ap_interrupt_handler(void *unused1, void *unused2);
+static void ap_reset(struct ap_device *ap_dev);
+static void ap_config_timeout(unsigned long ptr);
/*
* Module description.
@@ -101,6 +107,10 @@ static struct hrtimer ap_poll_timer;
* If z/VM change to 1500000 nanoseconds to adjust to z/VM polling.*/
static unsigned long long poll_timeout = 250000;
+/* Suspend flag */
+static int ap_suspend_flag;
+static struct bus_type ap_bus_type;
+
/**
* ap_using_interrupts() - Returns non-zero if interrupt support is
* available.
@@ -617,10 +627,79 @@ static int ap_uevent (struct device *dev, struct kobj_uevent_env *env)
return retval;
}
+static int ap_bus_suspend(struct device *dev, pm_message_t state)
+{
+ struct ap_device *ap_dev = to_ap_dev(dev);
+ unsigned long flags;
+
+ if (!ap_suspend_flag) {
+ ap_suspend_flag = 1;
+
+ /* Disable scanning for devices, thus we do not want to scan
+ * for them after removing.
+ */
+ del_timer_sync(&ap_config_timer);
+ if (ap_work_queue != NULL) {
+ destroy_workqueue(ap_work_queue);
+ ap_work_queue = NULL;
+ }
+ tasklet_disable(&ap_tasklet);
+ }
+ /* Poll on the device until all requests are finished. */
+ do {
+ flags = 0;
+ __ap_poll_device(ap_dev, &flags);
+ } while ((flags & 1) || (flags & 2));
+
+ ap_device_remove(dev);
+ return 0;
+}
+
+static int ap_bus_resume(struct device *dev)
+{
+ int rc = 0;
+ struct ap_device *ap_dev = to_ap_dev(dev);
+
+ if (ap_suspend_flag) {
+ ap_suspend_flag = 0;
+ if (!ap_interrupts_available())
+ ap_interrupt_indicator = NULL;
+ ap_device_probe(dev);
+ ap_reset(ap_dev);
+ setup_timer(&ap_dev->timeout, ap_request_timeout,
+ (unsigned long) ap_dev);
+ ap_scan_bus(NULL);
+ init_timer(&ap_config_timer);
+ ap_config_timer.function = ap_config_timeout;
+ ap_config_timer.data = 0;
+ ap_config_timer.expires = jiffies + ap_config_time * HZ;
+ add_timer(&ap_config_timer);
+ ap_work_queue = create_singlethread_workqueue("kapwork");
+ if (!ap_work_queue)
+ return -ENOMEM;
+ tasklet_enable(&ap_tasklet);
+ if (!ap_using_interrupts())
+ ap_schedule_poll_timer();
+ else
+ tasklet_schedule(&ap_tasklet);
+ if (ap_thread_flag)
+ rc = ap_poll_thread_start();
+ } else {
+ ap_device_probe(dev);
+ ap_reset(ap_dev);
+ setup_timer(&ap_dev->timeout, ap_request_timeout,
+ (unsigned long) ap_dev);
+ }
+
+ return rc;
+}
+
static struct bus_type ap_bus_type = {
.name = "ap",
.match = &ap_bus_match,
.uevent = &ap_uevent,
+ .suspend = ap_bus_suspend,
+ .resume = ap_bus_resume
};
static int ap_device_probe(struct device *dev)
@@ -1066,7 +1145,7 @@ ap_config_timeout(unsigned long ptr)
*/
static inline void ap_schedule_poll_timer(void)
{
- if (ap_using_interrupts())
+ if (ap_using_interrupts() || ap_suspend_flag)
return;
if (hrtimer_is_queued(&ap_poll_timer))
return;
@@ -1384,6 +1463,8 @@ static int ap_poll_thread(void *data)
set_user_nice(current, 19);
while (1) {
+ if (ap_suspend_flag)
+ return 0;
if (need_resched()) {
schedule();
continue;
@@ -1414,7 +1495,7 @@ static int ap_poll_thread_start(void)
{
int rc;
- if (ap_using_interrupts())
+ if (ap_using_interrupts() || ap_suspend_flag)
return 0;
mutex_lock(&ap_poll_thread_mutex);
if (!ap_poll_kthread) {
diff --git a/drivers/s390/net/netiucv.c b/drivers/s390/net/netiucv.c
index 52574ce797b2..8c36eafcfbfe 100644
--- a/drivers/s390/net/netiucv.c
+++ b/drivers/s390/net/netiucv.c
@@ -1307,7 +1307,7 @@ static void netiucv_pm_complete(struct device *dev)
*/
static int netiucv_pm_freeze(struct device *dev)
{
- struct netiucv_priv *priv = dev->driver_data;
+ struct netiucv_priv *priv = dev_get_drvdata(dev);
struct net_device *ndev = NULL;
int rc = 0;
@@ -1331,7 +1331,7 @@ out:
*/
static int netiucv_pm_restore_thaw(struct device *dev)
{
- struct netiucv_priv *priv = dev->driver_data;
+ struct netiucv_priv *priv = dev_get_drvdata(dev);
struct net_device *ndev = NULL;
int rc = 0;
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 6a19ed9a1194..9c23122f755f 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -258,10 +258,21 @@ config SCSI_SCAN_ASYNC
or async on the kernel's command line.
config SCSI_WAIT_SCAN
- tristate
+ tristate # No prompt here, this is an invisible symbol.
default m
depends on SCSI
depends on MODULES
+# scsi_wait_scan is a loadable module which waits until all the async scans are
+# complete. The idea is to use it in initrd/ initramfs scripts. You modprobe
+# it after all the modprobes of the root SCSI drivers and it will wait until
+# they have all finished scanning their buses before allowing the boot to
+# proceed. (This method is not applicable if targets boot independently in
+# parallel with the initiator, or with transports with non-deterministic target
+# discovery schemes, or if a transport driver does not support scsi_wait_scan.)
+#
+# This symbol is not exposed as a prompt because little is to be gained by
+# disabling it, whereas people who accidentally switch it off may wonder why
+# their mkinitrd gets into trouble.
menu "SCSI Transports"
depends on SCSI
diff --git a/drivers/scsi/atari_NCR5380.c b/drivers/scsi/atari_NCR5380.c
index 0471f8800483..4240b05aef6d 100644
--- a/drivers/scsi/atari_NCR5380.c
+++ b/drivers/scsi/atari_NCR5380.c
@@ -2826,8 +2826,7 @@ int NCR5380_abort(Scsi_Cmnd *cmd)
*/
local_irq_restore(flags);
- printk(KERN_INFO "scsi%d: warning : SCSI command probably completed successfully\n"
- KERN_INFO " before abortion\n", HOSTNO);
+ printk(KERN_INFO "scsi%d: warning : SCSI command probably completed successfully before abortion\n", HOSTNO);
/* Maybe it is sufficient just to release the ST-DMA lock... (if
* possible at all) At least, we should check if the lock could be
diff --git a/drivers/scsi/bnx2i/Kconfig b/drivers/scsi/bnx2i/Kconfig
index b62b482e55e7..1e9f7141102b 100644
--- a/drivers/scsi/bnx2i/Kconfig
+++ b/drivers/scsi/bnx2i/Kconfig
@@ -1,6 +1,8 @@
config SCSI_BNX2_ISCSI
tristate "Broadcom NetXtreme II iSCSI support"
select SCSI_ISCSI_ATTRS
+ select NETDEVICES
+ select NETDEV_1000
select CNIC
depends on PCI
---help---
diff --git a/drivers/scsi/cxgb3i/Kbuild b/drivers/scsi/cxgb3i/Kbuild
index 25a2032bfa26..70d060b7ff4f 100644
--- a/drivers/scsi/cxgb3i/Kbuild
+++ b/drivers/scsi/cxgb3i/Kbuild
@@ -1,4 +1,4 @@
-EXTRA_CFLAGS += -I$(TOPDIR)/drivers/net/cxgb3
+EXTRA_CFLAGS += -I$(srctree)/drivers/net/cxgb3
cxgb3i-y := cxgb3i_init.o cxgb3i_iscsi.o cxgb3i_pdu.o cxgb3i_offload.o cxgb3i_ddp.o
obj-$(CONFIG_SCSI_CXGB3_ISCSI) += cxgb3i.o
diff --git a/drivers/scsi/cxgb3i/cxgb3i_ddp.c b/drivers/scsi/cxgb3i/cxgb3i_ddp.c
index 99c912547902..344fd53b9954 100644
--- a/drivers/scsi/cxgb3i/cxgb3i_ddp.c
+++ b/drivers/scsi/cxgb3i/cxgb3i_ddp.c
@@ -206,6 +206,31 @@ int cxgb3i_ddp_find_page_index(unsigned long pgsz)
return DDP_PGIDX_MAX;
}
+/**
+ * cxgb3i_ddp_adjust_page_table - adjust page table with PAGE_SIZE
+ * return the ddp page index, if no match is found return DDP_PGIDX_MAX.
+ */
+int cxgb3i_ddp_adjust_page_table(void)
+{
+ int i;
+ unsigned int base_order, order;
+
+ if (PAGE_SIZE < (1UL << ddp_page_shift[0])) {
+ ddp_log_info("PAGE_SIZE 0x%lx too small, min. 0x%lx.\n",
+ PAGE_SIZE, 1UL << ddp_page_shift[0]);
+ return -EINVAL;
+ }
+
+ base_order = get_order(1UL << ddp_page_shift[0]);
+ order = get_order(1 << PAGE_SHIFT);
+ for (i = 0; i < DDP_PGIDX_MAX; i++) {
+ /* first is the kernel page size, then just doubling the size */
+ ddp_page_order[i] = order - base_order + i;
+ ddp_page_shift[i] = PAGE_SHIFT + i;
+ }
+ return 0;
+}
+
static inline void ddp_gl_unmap(struct pci_dev *pdev,
struct cxgb3i_gather_list *gl)
{
@@ -598,30 +623,40 @@ int cxgb3i_adapter_ddp_info(struct t3cdev *tdev,
* release all the resource held by the ddp pagepod manager for a given
* adapter if needed
*/
-void cxgb3i_ddp_cleanup(struct t3cdev *tdev)
+
+static void ddp_cleanup(struct kref *kref)
{
+ struct cxgb3i_ddp_info *ddp = container_of(kref,
+ struct cxgb3i_ddp_info,
+ refcnt);
int i = 0;
+
+ ddp_log_info("kref release ddp 0x%p, t3dev 0x%p.\n", ddp, ddp->tdev);
+
+ ddp->tdev->ulp_iscsi = NULL;
+ while (i < ddp->nppods) {
+ struct cxgb3i_gather_list *gl = ddp->gl_map[i];
+ if (gl) {
+ int npods = (gl->nelem + PPOD_PAGES_MAX - 1)
+ >> PPOD_PAGES_SHIFT;
+ ddp_log_info("t3dev 0x%p, ddp %d + %d.\n",
+ ddp->tdev, i, npods);
+ kfree(gl);
+ ddp_free_gl_skb(ddp, i, npods);
+ i += npods;
+ } else
+ i++;
+ }
+ cxgb3i_free_big_mem(ddp);
+}
+
+void cxgb3i_ddp_cleanup(struct t3cdev *tdev)
+{
struct cxgb3i_ddp_info *ddp = (struct cxgb3i_ddp_info *)tdev->ulp_iscsi;
ddp_log_info("t3dev 0x%p, release ddp 0x%p.\n", tdev, ddp);
-
- if (ddp) {
- tdev->ulp_iscsi = NULL;
- while (i < ddp->nppods) {
- struct cxgb3i_gather_list *gl = ddp->gl_map[i];
- if (gl) {
- int npods = (gl->nelem + PPOD_PAGES_MAX - 1)
- >> PPOD_PAGES_SHIFT;
- ddp_log_info("t3dev 0x%p, ddp %d + %d.\n",
- tdev, i, npods);
- kfree(gl);
- ddp_free_gl_skb(ddp, i, npods);
- i += npods;
- } else
- i++;
- }
- cxgb3i_free_big_mem(ddp);
- }
+ if (ddp)
+ kref_put(&ddp->refcnt, ddp_cleanup);
}
/**
@@ -631,12 +666,13 @@ void cxgb3i_ddp_cleanup(struct t3cdev *tdev)
*/
static void ddp_init(struct t3cdev *tdev)
{
- struct cxgb3i_ddp_info *ddp;
+ struct cxgb3i_ddp_info *ddp = tdev->ulp_iscsi;
struct ulp_iscsi_info uinfo;
unsigned int ppmax, bits;
int i, err;
- if (tdev->ulp_iscsi) {
+ if (ddp) {
+ kref_get(&ddp->refcnt);
ddp_log_warn("t3dev 0x%p, ddp 0x%p already set up.\n",
tdev, tdev->ulp_iscsi);
return;
@@ -670,6 +706,7 @@ static void ddp_init(struct t3cdev *tdev)
ppmax *
sizeof(struct cxgb3i_gather_list *));
spin_lock_init(&ddp->map_lock);
+ kref_init(&ddp->refcnt);
ddp->tdev = tdev;
ddp->pdev = uinfo.pdev;
@@ -715,6 +752,17 @@ void cxgb3i_ddp_init(struct t3cdev *tdev)
{
if (page_idx == DDP_PGIDX_MAX) {
page_idx = cxgb3i_ddp_find_page_index(PAGE_SIZE);
+
+ if (page_idx == DDP_PGIDX_MAX) {
+ ddp_log_info("system PAGE_SIZE %lu, update hw.\n",
+ PAGE_SIZE);
+ if (cxgb3i_ddp_adjust_page_table() < 0) {
+ ddp_log_info("PAGE_SIZE %lu, ddp disabled.\n",
+ PAGE_SIZE);
+ return;
+ }
+ page_idx = cxgb3i_ddp_find_page_index(PAGE_SIZE);
+ }
ddp_log_info("system PAGE_SIZE %lu, ddp idx %u.\n",
PAGE_SIZE, page_idx);
}
diff --git a/drivers/scsi/cxgb3i/cxgb3i_ddp.h b/drivers/scsi/cxgb3i/cxgb3i_ddp.h
index 0d296de7cf32..87dd56b422bf 100644
--- a/drivers/scsi/cxgb3i/cxgb3i_ddp.h
+++ b/drivers/scsi/cxgb3i/cxgb3i_ddp.h
@@ -54,6 +54,7 @@ struct cxgb3i_gather_list {
* struct cxgb3i_ddp_info - cxgb3i direct data placement for pdu payload
*
* @list: list head to link elements
+ * @refcnt: ref. count
* @tdev: pointer to t3cdev used by cxgb3 driver
* @max_txsz: max tx packet size for ddp
* @max_rxsz: max rx packet size for ddp
@@ -70,6 +71,7 @@ struct cxgb3i_gather_list {
*/
struct cxgb3i_ddp_info {
struct list_head list;
+ struct kref refcnt;
struct t3cdev *tdev;
struct pci_dev *pdev;
unsigned int max_txsz;
diff --git a/drivers/scsi/cxgb3i/cxgb3i_iscsi.c b/drivers/scsi/cxgb3i/cxgb3i_iscsi.c
index 74369a3f963b..c399f485aa7d 100644
--- a/drivers/scsi/cxgb3i/cxgb3i_iscsi.c
+++ b/drivers/scsi/cxgb3i/cxgb3i_iscsi.c
@@ -13,6 +13,7 @@
#include <linux/inet.h>
#include <linux/crypto.h>
+#include <linux/if_vlan.h>
#include <net/dst.h>
#include <net/tcp.h>
#include <scsi/scsi_cmnd.h>
@@ -184,6 +185,9 @@ static struct cxgb3i_hba *cxgb3i_hba_find_by_netdev(struct net_device *ndev)
struct cxgb3i_adapter *snic;
int i;
+ if (ndev->priv_flags & IFF_802_1Q_VLAN)
+ ndev = vlan_dev_real_dev(ndev);
+
read_lock(&cxgb3i_snic_rwlock);
list_for_each_entry(snic, &cxgb3i_snic_list, list_head) {
for (i = 0; i < snic->hba_cnt; i++) {
diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c
index c15878e88157..0a5609bb5817 100644
--- a/drivers/scsi/fcoe/fcoe.c
+++ b/drivers/scsi/fcoe/fcoe.c
@@ -45,8 +45,6 @@
#include "fcoe.h"
-static int debug_fcoe;
-
MODULE_AUTHOR("Open-FCoE.org");
MODULE_DESCRIPTION("FCoE");
MODULE_LICENSE("GPL v2");
@@ -305,23 +303,22 @@ static int fcoe_netdev_config(struct fc_lport *lp, struct net_device *netdev)
#ifdef NETIF_F_FCOE_CRC
if (netdev->features & NETIF_F_FCOE_CRC) {
lp->crc_offload = 1;
- printk(KERN_DEBUG "fcoe:%s supports FCCRC offload\n",
- netdev->name);
+ FCOE_NETDEV_DBG(netdev, "Supports FCCRC offload\n");
}
#endif
#ifdef NETIF_F_FSO
if (netdev->features & NETIF_F_FSO) {
lp->seq_offload = 1;
lp->lso_max = netdev->gso_max_size;
- printk(KERN_DEBUG "fcoe:%s supports LSO for max len 0x%x\n",
- netdev->name, lp->lso_max);
+ FCOE_NETDEV_DBG(netdev, "Supports LSO for max len 0x%x\n",
+ lp->lso_max);
}
#endif
if (netdev->fcoe_ddp_xid) {
lp->lro_enabled = 1;
lp->lro_xid = netdev->fcoe_ddp_xid;
- printk(KERN_DEBUG "fcoe:%s supports LRO for max xid 0x%x\n",
- netdev->name, lp->lro_xid);
+ FCOE_NETDEV_DBG(netdev, "Supports LRO for max xid 0x%x\n",
+ lp->lro_xid);
}
skb_queue_head_init(&fc->fcoe_pending_queue);
fc->fcoe_pending_queue_active = 0;
@@ -407,7 +404,8 @@ static int fcoe_shost_config(struct fc_lport *lp, struct Scsi_Host *shost,
/* add the new host to the SCSI-ml */
rc = scsi_add_host(lp->host, dev);
if (rc) {
- FC_DBG("fcoe_shost_config:error on scsi_add_host\n");
+ FCOE_NETDEV_DBG(fcoe_netdev(lp), "fcoe_shost_config: "
+ "error on scsi_add_host\n");
return rc;
}
sprintf(fc_host_symbolic_name(lp->host), "%s v%s over %s",
@@ -448,8 +446,7 @@ static int fcoe_if_destroy(struct net_device *netdev)
BUG_ON(!netdev);
- printk(KERN_DEBUG "fcoe_if_destroy:interface on %s\n",
- netdev->name);
+ FCOE_NETDEV_DBG(netdev, "Destroying interface\n");
lp = fcoe_hostlist_lookup(netdev);
if (!lp)
@@ -560,8 +557,7 @@ static int fcoe_if_create(struct net_device *netdev)
BUG_ON(!netdev);
- printk(KERN_DEBUG "fcoe_if_create:interface on %s\n",
- netdev->name);
+ FCOE_NETDEV_DBG(netdev, "Create Interface\n");
lp = fcoe_hostlist_lookup(netdev);
if (lp)
@@ -570,7 +566,7 @@ static int fcoe_if_create(struct net_device *netdev)
shost = libfc_host_alloc(&fcoe_shost_template,
sizeof(struct fcoe_softc));
if (!shost) {
- FC_DBG("Could not allocate host structure\n");
+ FCOE_NETDEV_DBG(netdev, "Could not allocate host structure\n");
return -ENOMEM;
}
lp = shost_priv(shost);
@@ -579,7 +575,8 @@ static int fcoe_if_create(struct net_device *netdev)
/* configure fc_lport, e.g., em */
rc = fcoe_lport_config(lp);
if (rc) {
- FC_DBG("Could not configure lport\n");
+ FCOE_NETDEV_DBG(netdev, "Could not configure lport for the "
+ "interface\n");
goto out_host_put;
}
@@ -593,28 +590,32 @@ static int fcoe_if_create(struct net_device *netdev)
/* configure lport network properties */
rc = fcoe_netdev_config(lp, netdev);
if (rc) {
- FC_DBG("Could not configure netdev for the interface\n");
+ FCOE_NETDEV_DBG(netdev, "Could not configure netdev for the "
+ "interface\n");
goto out_netdev_cleanup;
}
/* configure lport scsi host properties */
rc = fcoe_shost_config(lp, shost, &netdev->dev);
if (rc) {
- FC_DBG("Could not configure shost for lport\n");
+ FCOE_NETDEV_DBG(netdev, "Could not configure shost for the "
+ "interface\n");
goto out_netdev_cleanup;
}
/* lport exch manager allocation */
rc = fcoe_em_config(lp);
if (rc) {
- FC_DBG("Could not configure em for lport\n");
+ FCOE_NETDEV_DBG(netdev, "Could not configure the EM for the "
+ "interface\n");
goto out_netdev_cleanup;
}
/* Initialize the library */
rc = fcoe_libfc_config(lp, &fcoe_libfc_fcn_templ);
if (rc) {
- FC_DBG("Could not configure libfc for lport!\n");
+ FCOE_NETDEV_DBG(netdev, "Could not configure libfc for the "
+ "interface\n");
goto out_lp_destroy;
}
@@ -653,7 +654,7 @@ static int __init fcoe_if_init(void)
fc_attach_transport(&fcoe_transport_function);
if (!scsi_transport_fcoe_sw) {
- printk(KERN_ERR "fcoe_init:fc_attach_transport() failed\n");
+ printk(KERN_ERR "fcoe: Failed to attach to the FC transport\n");
return -ENODEV;
}
@@ -714,7 +715,7 @@ static void fcoe_percpu_thread_destroy(unsigned int cpu)
unsigned targ_cpu = smp_processor_id();
#endif /* CONFIG_SMP */
- printk(KERN_DEBUG "fcoe: Destroying receive thread for CPU %d\n", cpu);
+ FCOE_DBG("Destroying receive thread for CPU %d\n", cpu);
/* Prevent any new skbs from being queued for this CPU. */
p = &per_cpu(fcoe_percpu, cpu);
@@ -736,8 +737,8 @@ static void fcoe_percpu_thread_destroy(unsigned int cpu)
p0 = &per_cpu(fcoe_percpu, targ_cpu);
spin_lock_bh(&p0->fcoe_rx_list.lock);
if (p0->thread) {
- FC_DBG("Moving frames from CPU %d to CPU %d\n",
- cpu, targ_cpu);
+ FCOE_DBG("Moving frames from CPU %d to CPU %d\n",
+ cpu, targ_cpu);
while ((skb = __skb_dequeue(&p->fcoe_rx_list)) != NULL)
__skb_queue_tail(&p0->fcoe_rx_list, skb);
@@ -803,12 +804,12 @@ static int fcoe_cpu_callback(struct notifier_block *nfb,
switch (action) {
case CPU_ONLINE:
case CPU_ONLINE_FROZEN:
- FC_DBG("CPU %x online: Create Rx thread\n", cpu);
+ FCOE_DBG("CPU %x online: Create Rx thread\n", cpu);
fcoe_percpu_thread_create(cpu);
break;
case CPU_DEAD:
case CPU_DEAD_FROZEN:
- FC_DBG("CPU %x offline: Remove Rx thread\n", cpu);
+ FCOE_DBG("CPU %x offline: Remove Rx thread\n", cpu);
fcoe_percpu_thread_destroy(cpu);
break;
default:
@@ -846,24 +847,21 @@ int fcoe_rcv(struct sk_buff *skb, struct net_device *dev,
fc = container_of(ptype, struct fcoe_softc, fcoe_packet_type);
lp = fc->ctlr.lp;
if (unlikely(lp == NULL)) {
- FC_DBG("cannot find hba structure");
+ FCOE_NETDEV_DBG(dev, "Cannot find hba structure");
goto err2;
}
if (!lp->link_up)
goto err2;
- if (unlikely(debug_fcoe)) {
- FC_DBG("skb_info: len:%d data_len:%d head:%p data:%p tail:%p "
- "end:%p sum:%d dev:%s", skb->len, skb->data_len,
- skb->head, skb->data, skb_tail_pointer(skb),
- skb_end_pointer(skb), skb->csum,
- skb->dev ? skb->dev->name : "<NULL>");
-
- }
+ FCOE_NETDEV_DBG(dev, "skb_info: len:%d data_len:%d head:%p "
+ "data:%p tail:%p end:%p sum:%d dev:%s",
+ skb->len, skb->data_len, skb->head, skb->data,
+ skb_tail_pointer(skb), skb_end_pointer(skb),
+ skb->csum, skb->dev ? skb->dev->name : "<NULL>");
/* check for FCOE packet type */
if (unlikely(eth_hdr(skb)->h_proto != htons(ETH_P_FCOE))) {
- FC_DBG("wrong FC type frame");
+ FCOE_NETDEV_DBG(dev, "Wrong FC type frame");
goto err;
}
@@ -901,8 +899,9 @@ int fcoe_rcv(struct sk_buff *skb, struct net_device *dev,
* the first CPU now. For non-SMP systems this
* will check the same CPU twice.
*/
- FC_DBG("CPU is online, but no receive thread ready "
- "for incoming skb- using first online CPU.\n");
+ FCOE_NETDEV_DBG(dev, "CPU is online, but no receive thread "
+ "ready for incoming skb- using first online "
+ "CPU.\n");
spin_unlock_bh(&fps->fcoe_rx_list.lock);
cpu = first_cpu(cpu_online_map);
@@ -1201,19 +1200,17 @@ int fcoe_percpu_receive_thread(void *arg)
fr = fcoe_dev_from_skb(skb);
lp = fr->fr_dev;
if (unlikely(lp == NULL)) {
- FC_DBG("invalid HBA Structure");
+ FCOE_NETDEV_DBG(skb->dev, "Invalid HBA Structure");
kfree_skb(skb);
continue;
}
- if (unlikely(debug_fcoe)) {
- FC_DBG("skb_info: len:%d data_len:%d head:%p data:%p "
- "tail:%p end:%p sum:%d dev:%s",
- skb->len, skb->data_len,
- skb->head, skb->data, skb_tail_pointer(skb),
- skb_end_pointer(skb), skb->csum,
- skb->dev ? skb->dev->name : "<NULL>");
- }
+ FCOE_NETDEV_DBG(skb->dev, "skb_info: len:%d data_len:%d "
+ "head:%p data:%p tail:%p end:%p sum:%d dev:%s",
+ skb->len, skb->data_len,
+ skb->head, skb->data, skb_tail_pointer(skb),
+ skb_end_pointer(skb), skb->csum,
+ skb->dev ? skb->dev->name : "<NULL>");
/*
* Save source MAC address before discarding header.
@@ -1233,7 +1230,7 @@ int fcoe_percpu_receive_thread(void *arg)
stats = fc_lport_get_stats(lp);
if (unlikely(FC_FCOE_DECAPS_VER(hp) != FC_FCOE_VER)) {
if (stats->ErrorFrames < 5)
- printk(KERN_WARNING "FCoE version "
+ printk(KERN_WARNING "fcoe: FCoE version "
"mismatch: The frame has "
"version %x, but the "
"initiator supports version "
@@ -1286,7 +1283,7 @@ int fcoe_percpu_receive_thread(void *arg)
if (fr_flags(fp) & FCPHF_CRC_UNCHECKED) {
if (le32_to_cpu(fr_crc(fp)) !=
~crc32(~0, skb->data, fr_len)) {
- if (debug_fcoe || stats->InvalidCRCCount < 5)
+ if (stats->InvalidCRCCount < 5)
printk(KERN_WARNING "fcoe: dropping "
"frame with CRC error\n");
stats->InvalidCRCCount++;
@@ -1432,7 +1429,8 @@ static int fcoe_device_notification(struct notifier_block *notifier,
case NETDEV_REGISTER:
break;
default:
- FC_DBG("Unknown event %ld from netdev netlink\n", event);
+ FCOE_NETDEV_DBG(real_dev, "Unknown event %ld "
+ "from netdev netlink\n", event);
}
if (link_possible && !fcoe_link_ok(lp))
fcoe_ctlr_link_up(&fc->ctlr);
@@ -1505,8 +1503,8 @@ static int fcoe_ethdrv_get(const struct net_device *netdev)
owner = fcoe_netdev_to_module_owner(netdev);
if (owner) {
- printk(KERN_DEBUG "fcoe:hold driver module %s for %s\n",
- module_name(owner), netdev->name);
+ FCOE_NETDEV_DBG(netdev, "Hold driver module %s\n",
+ module_name(owner));
return try_module_get(owner);
}
return -ENODEV;
@@ -1527,8 +1525,8 @@ static int fcoe_ethdrv_put(const struct net_device *netdev)
owner = fcoe_netdev_to_module_owner(netdev);
if (owner) {
- printk(KERN_DEBUG "fcoe:release driver module %s for %s\n",
- module_name(owner), netdev->name);
+ FCOE_NETDEV_DBG(netdev, "Release driver module %s\n",
+ module_name(owner));
module_put(owner);
return 0;
}
@@ -1559,7 +1557,7 @@ static int fcoe_destroy(const char *buffer, struct kernel_param *kp)
}
rc = fcoe_if_destroy(netdev);
if (rc) {
- printk(KERN_ERR "fcoe: fcoe_if_destroy(%s) failed\n",
+ printk(KERN_ERR "fcoe: Failed to destroy interface (%s)\n",
netdev->name);
rc = -EIO;
goto out_putdev;
@@ -1598,7 +1596,7 @@ static int fcoe_create(const char *buffer, struct kernel_param *kp)
rc = fcoe_if_create(netdev);
if (rc) {
- printk(KERN_ERR "fcoe: fcoe_if_create(%s) failed\n",
+ printk(KERN_ERR "fcoe: Failed to create interface (%s)\n",
netdev->name);
fcoe_ethdrv_put(netdev);
rc = -EIO;
diff --git a/drivers/scsi/fcoe/fcoe.h b/drivers/scsi/fcoe/fcoe.h
index a1eb8c1988b0..0d724fa0898f 100644
--- a/drivers/scsi/fcoe/fcoe.h
+++ b/drivers/scsi/fcoe/fcoe.h
@@ -40,6 +40,30 @@
#define FCOE_MIN_XID 0x0001 /* the min xid supported by fcoe_sw */
#define FCOE_MAX_XID 0x07ef /* the max xid supported by fcoe_sw */
+unsigned int fcoe_debug_logging;
+module_param_named(debug_logging, fcoe_debug_logging, int, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(debug_logging, "a bit mask of logging levels");
+
+#define FCOE_LOGGING 0x01 /* General logging, not categorized */
+#define FCOE_NETDEV_LOGGING 0x02 /* Netdevice logging */
+
+#define FCOE_CHECK_LOGGING(LEVEL, CMD) \
+do { \
+ if (unlikely(fcoe_debug_logging & LEVEL)) \
+ do { \
+ CMD; \
+ } while (0); \
+} while (0);
+
+#define FCOE_DBG(fmt, args...) \
+ FCOE_CHECK_LOGGING(FCOE_LOGGING, \
+ printk(KERN_INFO "fcoe: " fmt, ##args);)
+
+#define FCOE_NETDEV_DBG(netdev, fmt, args...) \
+ FCOE_CHECK_LOGGING(FCOE_NETDEV_LOGGING, \
+ printk(KERN_INFO "fcoe: %s" fmt, \
+ netdev->name, ##args);)
+
/*
* this percpu struct for fcoe
*/
diff --git a/drivers/scsi/fcoe/libfcoe.c b/drivers/scsi/fcoe/libfcoe.c
index 2f5bc7fd3fa9..f544340d318b 100644
--- a/drivers/scsi/fcoe/libfcoe.c
+++ b/drivers/scsi/fcoe/libfcoe.c
@@ -56,15 +56,28 @@ static void fcoe_ctlr_recv_work(struct work_struct *);
static u8 fcoe_all_fcfs[ETH_ALEN] = FIP_ALL_FCF_MACS;
-static u32 fcoe_ctlr_debug; /* 1 for basic, 2 for noisy debug */
+unsigned int libfcoe_debug_logging;
+module_param_named(debug_logging, libfcoe_debug_logging, int, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(debug_logging, "a bit mask of logging levels");
-#define FIP_DBG_LVL(level, fmt, args...) \
+#define LIBFCOE_LOGGING 0x01 /* General logging, not categorized */
+#define LIBFCOE_FIP_LOGGING 0x02 /* FIP logging */
+
+#define LIBFCOE_CHECK_LOGGING(LEVEL, CMD) \
+do { \
+ if (unlikely(libfcoe_debug_logging & LEVEL)) \
do { \
- if (fcoe_ctlr_debug >= (level)) \
- FC_DBG(fmt, ##args); \
- } while (0)
+ CMD; \
+ } while (0); \
+} while (0);
+
+#define LIBFCOE_DBG(fmt, args...) \
+ LIBFCOE_CHECK_LOGGING(LIBFCOE_LOGGING, \
+ printk(KERN_INFO "libfcoe: " fmt, ##args);)
-#define FIP_DBG(fmt, args...) FIP_DBG_LVL(1, fmt, ##args)
+#define LIBFCOE_FIP_DBG(fmt, args...) \
+ LIBFCOE_CHECK_LOGGING(LIBFCOE_FIP_LOGGING, \
+ printk(KERN_INFO "fip: " fmt, ##args);)
/*
* Return non-zero if FCF fcoe_size has been validated.
@@ -243,7 +256,7 @@ void fcoe_ctlr_link_up(struct fcoe_ctlr *fip)
fip->last_link = 1;
fip->link = 1;
spin_unlock_bh(&fip->lock);
- FIP_DBG("%s", "setting AUTO mode.\n");
+ LIBFCOE_FIP_DBG("%s", "setting AUTO mode.\n");
fc_linkup(fip->lp);
fcoe_ctlr_solicit(fip, NULL);
} else
@@ -614,7 +627,8 @@ static int fcoe_ctlr_parse_adv(struct sk_buff *skb, struct fcoe_fcf *fcf)
((struct fip_mac_desc *)desc)->fd_mac,
ETH_ALEN);
if (!is_valid_ether_addr(fcf->fcf_mac)) {
- FIP_DBG("invalid MAC addr in FIP adv\n");
+ LIBFCOE_FIP_DBG("Invalid MAC address "
+ "in FIP adv\n");
return -EINVAL;
}
break;
@@ -647,8 +661,8 @@ static int fcoe_ctlr_parse_adv(struct sk_buff *skb, struct fcoe_fcf *fcf)
case FIP_DT_LOGO:
case FIP_DT_ELP:
default:
- FIP_DBG("unexpected descriptor type %x in FIP adv\n",
- desc->fip_dtype);
+ LIBFCOE_FIP_DBG("unexpected descriptor type %x "
+ "in FIP adv\n", desc->fip_dtype);
/* standard says ignore unknown descriptors >= 128 */
if (desc->fip_dtype < FIP_DT_VENDOR_BASE)
return -EINVAL;
@@ -664,8 +678,8 @@ static int fcoe_ctlr_parse_adv(struct sk_buff *skb, struct fcoe_fcf *fcf)
return 0;
len_err:
- FIP_DBG("FIP length error in descriptor type %x len %zu\n",
- desc->fip_dtype, dlen);
+ LIBFCOE_FIP_DBG("FIP length error in descriptor type %x len %zu\n",
+ desc->fip_dtype, dlen);
return -EINVAL;
}
@@ -728,9 +742,10 @@ static void fcoe_ctlr_recv_adv(struct fcoe_ctlr *fip, struct sk_buff *skb)
}
mtu_valid = fcoe_ctlr_mtu_valid(fcf);
fcf->time = jiffies;
- FIP_DBG_LVL(found ? 2 : 1, "%s FCF for fab %llx map %x val %d\n",
- found ? "old" : "new",
- fcf->fabric_name, fcf->fc_map, mtu_valid);
+ if (!found) {
+ LIBFCOE_FIP_DBG("New FCF for fab %llx map %x val %d\n",
+ fcf->fabric_name, fcf->fc_map, mtu_valid);
+ }
/*
* If this advertisement is not solicited and our max receive size
@@ -807,7 +822,8 @@ static void fcoe_ctlr_recv_els(struct fcoe_ctlr *fip, struct sk_buff *skb)
((struct fip_mac_desc *)desc)->fd_mac,
ETH_ALEN);
if (!is_valid_ether_addr(granted_mac)) {
- FIP_DBG("invalid MAC addrs in FIP ELS\n");
+ LIBFCOE_FIP_DBG("Invalid MAC address "
+ "in FIP ELS\n");
goto drop;
}
break;
@@ -825,8 +841,8 @@ static void fcoe_ctlr_recv_els(struct fcoe_ctlr *fip, struct sk_buff *skb)
els_dtype = desc->fip_dtype;
break;
default:
- FIP_DBG("unexpected descriptor type %x "
- "in FIP adv\n", desc->fip_dtype);
+ LIBFCOE_FIP_DBG("unexpected descriptor type %x "
+ "in FIP adv\n", desc->fip_dtype);
/* standard says ignore unknown descriptors >= 128 */
if (desc->fip_dtype < FIP_DT_VENDOR_BASE)
goto drop;
@@ -867,8 +883,8 @@ static void fcoe_ctlr_recv_els(struct fcoe_ctlr *fip, struct sk_buff *skb)
return;
len_err:
- FIP_DBG("FIP length error in descriptor type %x len %zu\n",
- desc->fip_dtype, dlen);
+ LIBFCOE_FIP_DBG("FIP length error in descriptor type %x len %zu\n",
+ desc->fip_dtype, dlen);
drop:
kfree_skb(skb);
}
@@ -894,7 +910,7 @@ static void fcoe_ctlr_recv_clr_vlink(struct fcoe_ctlr *fip,
struct fc_lport *lp = fip->lp;
u32 desc_mask;
- FIP_DBG("Clear Virtual Link received\n");
+ LIBFCOE_FIP_DBG("Clear Virtual Link received\n");
if (!fcf)
return;
if (!fcf || !fc_host_port_id(lp->host))
@@ -952,9 +968,9 @@ static void fcoe_ctlr_recv_clr_vlink(struct fcoe_ctlr *fip,
* reset only if all required descriptors were present and valid.
*/
if (desc_mask) {
- FIP_DBG("missing descriptors mask %x\n", desc_mask);
+ LIBFCOE_FIP_DBG("missing descriptors mask %x\n", desc_mask);
} else {
- FIP_DBG("performing Clear Virtual Link\n");
+ LIBFCOE_FIP_DBG("performing Clear Virtual Link\n");
fcoe_ctlr_reset(fip, FIP_ST_ENABLED);
}
}
@@ -1002,10 +1018,6 @@ static int fcoe_ctlr_recv_handler(struct fcoe_ctlr *fip, struct sk_buff *skb)
op = ntohs(fiph->fip_op);
sub = fiph->fip_subcode;
- FIP_DBG_LVL(2, "ver %x op %x/%x dl %x fl %x\n",
- FIP_VER_DECAPS(fiph->fip_ver), op, sub,
- ntohs(fiph->fip_dl_len), ntohs(fiph->fip_flags));
-
if (FIP_VER_DECAPS(fiph->fip_ver) != FIP_VER)
goto drop;
if (ntohs(fiph->fip_dl_len) * FIP_BPW + sizeof(*fiph) > skb->len)
@@ -1017,7 +1029,7 @@ static int fcoe_ctlr_recv_handler(struct fcoe_ctlr *fip, struct sk_buff *skb)
fip->map_dest = 0;
fip->state = FIP_ST_ENABLED;
state = FIP_ST_ENABLED;
- FIP_DBG("using FIP mode\n");
+ LIBFCOE_FIP_DBG("Using FIP mode\n");
}
spin_unlock_bh(&fip->lock);
if (state != FIP_ST_ENABLED)
@@ -1052,14 +1064,15 @@ static void fcoe_ctlr_select(struct fcoe_ctlr *fip)
struct fcoe_fcf *best = NULL;
list_for_each_entry(fcf, &fip->fcfs, list) {
- FIP_DBG("consider FCF for fab %llx VFID %d map %x val %d\n",
- fcf->fabric_name, fcf->vfid,
- fcf->fc_map, fcoe_ctlr_mtu_valid(fcf));
+ LIBFCOE_FIP_DBG("consider FCF for fab %llx VFID %d map %x "
+ "val %d\n", fcf->fabric_name, fcf->vfid,
+ fcf->fc_map, fcoe_ctlr_mtu_valid(fcf));
if (!fcoe_ctlr_fcf_usable(fcf)) {
- FIP_DBG("FCF for fab %llx map %x %svalid %savailable\n",
- fcf->fabric_name, fcf->fc_map,
- (fcf->flags & FIP_FL_SOL) ? "" : "in",
- (fcf->flags & FIP_FL_AVAIL) ? "" : "un");
+ LIBFCOE_FIP_DBG("FCF for fab %llx map %x %svalid "
+ "%savailable\n", fcf->fabric_name,
+ fcf->fc_map, (fcf->flags & FIP_FL_SOL)
+ ? "" : "in", (fcf->flags & FIP_FL_AVAIL)
+ ? "" : "un");
continue;
}
if (!best) {
@@ -1069,7 +1082,8 @@ static void fcoe_ctlr_select(struct fcoe_ctlr *fip)
if (fcf->fabric_name != best->fabric_name ||
fcf->vfid != best->vfid ||
fcf->fc_map != best->fc_map) {
- FIP_DBG("conflicting fabric, VFID, or FC-MAP\n");
+ LIBFCOE_FIP_DBG("Conflicting fabric, VFID, "
+ "or FC-MAP\n");
return;
}
if (fcf->pri < best->pri)
@@ -1113,7 +1127,7 @@ static void fcoe_ctlr_timeout(unsigned long arg)
if (sel != fcf) {
fcf = sel; /* the old FCF may have been freed */
if (sel) {
- printk(KERN_INFO "host%d: FIP selected "
+ printk(KERN_INFO "libfcoe: host%d: FIP selected "
"Fibre-Channel Forwarder MAC %s\n",
fip->lp->host->host_no,
print_mac(buf, sel->fcf_mac));
@@ -1123,7 +1137,7 @@ static void fcoe_ctlr_timeout(unsigned long arg)
fip->ctlr_ka_time = jiffies + sel->fka_period;
fip->link = 1;
} else {
- printk(KERN_NOTICE "host%d: "
+ printk(KERN_NOTICE "libfcoe: host%d: "
"FIP Fibre-Channel Forwarder timed out. "
"Starting FCF discovery.\n",
fip->lp->host->host_no);
@@ -1247,7 +1261,7 @@ int fcoe_ctlr_recv_flogi(struct fcoe_ctlr *fip, struct fc_frame *fp, u8 *sa)
return -EINVAL;
}
fip->state = FIP_ST_NON_FIP;
- FIP_DBG("received FLOGI LS_ACC using non-FIP mode\n");
+ LIBFCOE_FIP_DBG("received FLOGI LS_ACC using non-FIP mode\n");
/*
* FLOGI accepted.
@@ -1276,7 +1290,7 @@ int fcoe_ctlr_recv_flogi(struct fcoe_ctlr *fip, struct fc_frame *fp, u8 *sa)
memcpy(fip->dest_addr, sa, ETH_ALEN);
fip->map_dest = 0;
if (fip->state == FIP_ST_NON_FIP)
- FIP_DBG("received FLOGI REQ, "
+ LIBFCOE_FIP_DBG("received FLOGI REQ, "
"using non-FIP mode\n");
fip->state = FIP_ST_NON_FIP;
}
diff --git a/drivers/scsi/fnic/fnic_main.c b/drivers/scsi/fnic/fnic_main.c
index a84072865fc2..2c266c01dc5a 100644
--- a/drivers/scsi/fnic/fnic_main.c
+++ b/drivers/scsi/fnic/fnic_main.c
@@ -473,16 +473,16 @@ static int __devinit fnic_probe(struct pci_dev *pdev,
* limitation for the device. Try 40-bit first, and
* fail to 32-bit.
*/
- err = pci_set_dma_mask(pdev, DMA_40BIT_MASK);
+ err = pci_set_dma_mask(pdev, DMA_BIT_MASK(40));
if (err) {
- err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
+ err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
if (err) {
shost_printk(KERN_ERR, fnic->lport->host,
"No usable DMA configuration "
"aborting\n");
goto err_out_release_regions;
}
- err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
+ err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
if (err) {
shost_printk(KERN_ERR, fnic->lport->host,
"Unable to obtain 32-bit DMA "
@@ -490,7 +490,7 @@ static int __devinit fnic_probe(struct pci_dev *pdev,
goto err_out_release_regions;
}
} else {
- err = pci_set_consistent_dma_mask(pdev, DMA_40BIT_MASK);
+ err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(40));
if (err) {
shost_printk(KERN_ERR, fnic->lport->host,
"Unable to obtain 40-bit DMA "
diff --git a/drivers/scsi/fnic/fnic_scsi.c b/drivers/scsi/fnic/fnic_scsi.c
index eabf36502856..bfc996971b81 100644
--- a/drivers/scsi/fnic/fnic_scsi.c
+++ b/drivers/scsi/fnic/fnic_scsi.c
@@ -245,7 +245,7 @@ static inline int fnic_queue_wq_copy_desc(struct fnic *fnic,
struct vnic_wq_copy *wq,
struct fnic_io_req *io_req,
struct scsi_cmnd *sc,
- u32 sg_count)
+ int sg_count)
{
struct scatterlist *sg;
struct fc_rport *rport = starget_to_rport(scsi_target(sc->device));
@@ -260,9 +260,6 @@ static inline int fnic_queue_wq_copy_desc(struct fnic *fnic,
char msg[2];
if (sg_count) {
- BUG_ON(sg_count < 0);
- BUG_ON(sg_count > FNIC_MAX_SG_DESC_CNT);
-
/* For each SGE, create a device desc entry */
desc = io_req->sgl_list;
for_each_sg(scsi_sglist(sc), sg, sg_count, i) {
@@ -344,7 +341,7 @@ int fnic_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
struct fnic *fnic;
struct vnic_wq_copy *wq;
int ret;
- u32 sg_count;
+ int sg_count;
unsigned long flags;
unsigned long ptr;
diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
index 89d41a424b33..5fd2da494d08 100644
--- a/drivers/scsi/hosts.c
+++ b/drivers/scsi/hosts.c
@@ -40,7 +40,7 @@
#include "scsi_logging.h"
-static int scsi_host_next_hn; /* host_no for next new host */
+static atomic_t scsi_host_next_hn; /* host_no for next new host */
static void scsi_host_cls_release(struct device *dev)
@@ -333,7 +333,11 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
mutex_init(&shost->scan_mutex);
- shost->host_no = scsi_host_next_hn++; /* XXX(hch): still racy */
+ /*
+ * subtract one because we increment first then return, but we need to
+ * know what the next host number was before increment
+ */
+ shost->host_no = atomic_inc_return(&scsi_host_next_hn) - 1;
shost->dma_channel = 0xff;
/* These three are default values which can be overridden */
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c
index b4b805e8d7db..166d96450a0e 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.c
+++ b/drivers/scsi/ibmvscsi/ibmvfc.c
@@ -2254,10 +2254,13 @@ static void ibmvfc_handle_async(struct ibmvfc_async_crq *crq,
continue;
if (crq->node_name && tgt->ids.node_name != crq->node_name)
continue;
- ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
+ if (tgt->need_login && crq->event == IBMVFC_AE_ELS_LOGO)
+ tgt->logo_rcvd = 1;
+ if (!tgt->need_login || crq->event == IBMVFC_AE_ELS_PLOGI) {
+ ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
+ ibmvfc_reinit_host(vhost);
+ }
}
-
- ibmvfc_reinit_host(vhost);
break;
case IBMVFC_AE_LINK_DOWN:
case IBMVFC_AE_ADAPTER_FAILED:
@@ -2783,27 +2786,27 @@ static void ibmvfc_tasklet(void *data)
spin_lock_irqsave(vhost->host->host_lock, flags);
while (!done) {
- /* Pull all the valid messages off the CRQ */
- while ((crq = ibmvfc_next_crq(vhost)) != NULL) {
- ibmvfc_handle_crq(crq, vhost);
- crq->valid = 0;
- }
-
/* Pull all the valid messages off the async CRQ */
while ((async = ibmvfc_next_async_crq(vhost)) != NULL) {
ibmvfc_handle_async(async, vhost);
async->valid = 0;
}
- vio_enable_interrupts(vdev);
- if ((crq = ibmvfc_next_crq(vhost)) != NULL) {
- vio_disable_interrupts(vdev);
+ /* Pull all the valid messages off the CRQ */
+ while ((crq = ibmvfc_next_crq(vhost)) != NULL) {
ibmvfc_handle_crq(crq, vhost);
crq->valid = 0;
- } else if ((async = ibmvfc_next_async_crq(vhost)) != NULL) {
+ }
+
+ vio_enable_interrupts(vdev);
+ if ((async = ibmvfc_next_async_crq(vhost)) != NULL) {
vio_disable_interrupts(vdev);
ibmvfc_handle_async(async, vhost);
async->valid = 0;
+ } else if ((crq = ibmvfc_next_crq(vhost)) != NULL) {
+ vio_disable_interrupts(vdev);
+ ibmvfc_handle_crq(crq, vhost);
+ crq->valid = 0;
} else
done = 1;
}
@@ -2927,7 +2930,11 @@ static void ibmvfc_tgt_prli_done(struct ibmvfc_event *evt)
break;
case IBMVFC_MAD_FAILED:
default:
- if (ibmvfc_retry_cmd(rsp->status, rsp->error))
+ if ((rsp->status & IBMVFC_VIOS_FAILURE) && rsp->error == IBMVFC_PLOGI_REQUIRED)
+ level += ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_plogi);
+ else if (tgt->logo_rcvd)
+ level += ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_plogi);
+ else if (ibmvfc_retry_cmd(rsp->status, rsp->error))
level += ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_prli);
else
ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
@@ -3054,6 +3061,7 @@ static void ibmvfc_tgt_send_plogi(struct ibmvfc_target *tgt)
return;
kref_get(&tgt->kref);
+ tgt->logo_rcvd = 0;
evt = ibmvfc_get_event(vhost);
vhost->discovery_threads++;
ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT);
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.h b/drivers/scsi/ibmvscsi/ibmvfc.h
index c2668d7d67f5..007fa1c9ef14 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.h
+++ b/drivers/scsi/ibmvscsi/ibmvfc.h
@@ -605,6 +605,7 @@ struct ibmvfc_target {
int need_login;
int add_rport;
int init_retries;
+ int logo_rcvd;
u32 cancel_key;
struct ibmvfc_service_parms service_parms;
struct ibmvfc_service_parms service_parms_change;
diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c
index 869a11bdccbd..9928704e235f 100644
--- a/drivers/scsi/ibmvscsi/ibmvscsi.c
+++ b/drivers/scsi/ibmvscsi/ibmvscsi.c
@@ -1095,9 +1095,14 @@ static void adapter_info_rsp(struct srp_event_struct *evt_struct)
MAX_INDIRECT_BUFS);
hostdata->host->sg_tablesize = MAX_INDIRECT_BUFS;
}
+
+ if (hostdata->madapter_info.os_type == 3) {
+ enable_fast_fail(hostdata);
+ return;
+ }
}
- enable_fast_fail(hostdata);
+ send_srp_login(hostdata);
}
/**
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index 0f8bc772b112..5f045505a1f4 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -131,13 +131,13 @@ static const struct ipr_chip_cfg_t ipr_chip_cfg[] = {
};
static const struct ipr_chip_t ipr_chip[] = {
- { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, &ipr_chip_cfg[0] },
- { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, &ipr_chip_cfg[0] },
- { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, &ipr_chip_cfg[0] },
- { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, &ipr_chip_cfg[0] },
- { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, &ipr_chip_cfg[0] },
- { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, &ipr_chip_cfg[1] },
- { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, &ipr_chip_cfg[1] }
+ { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, IPR_USE_LSI, &ipr_chip_cfg[0] },
+ { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, IPR_USE_LSI, &ipr_chip_cfg[0] },
+ { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, IPR_USE_LSI, &ipr_chip_cfg[0] },
+ { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, IPR_USE_LSI, &ipr_chip_cfg[0] },
+ { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, IPR_USE_MSI, &ipr_chip_cfg[0] },
+ { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, IPR_USE_LSI, &ipr_chip_cfg[1] },
+ { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, IPR_USE_LSI, &ipr_chip_cfg[1] }
};
static int ipr_max_bus_speeds [] = {
@@ -7367,6 +7367,7 @@ static void __devinit ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
INIT_LIST_HEAD(&ioa_cfg->used_res_q);
INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread);
init_waitqueue_head(&ioa_cfg->reset_wait_q);
+ init_waitqueue_head(&ioa_cfg->msi_wait_q);
ioa_cfg->sdt_state = INACTIVE;
if (ipr_enable_cache)
ioa_cfg->cache_state = CACHE_ENABLED;
@@ -7398,25 +7399,108 @@ static void __devinit ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
}
/**
- * ipr_get_chip_cfg - Find adapter chip configuration
+ * ipr_get_chip_info - Find adapter chip information
* @dev_id: PCI device id struct
*
* Return value:
- * ptr to chip config on success / NULL on failure
+ * ptr to chip information on success / NULL on failure
**/
-static const struct ipr_chip_cfg_t * __devinit
-ipr_get_chip_cfg(const struct pci_device_id *dev_id)
+static const struct ipr_chip_t * __devinit
+ipr_get_chip_info(const struct pci_device_id *dev_id)
{
int i;
for (i = 0; i < ARRAY_SIZE(ipr_chip); i++)
if (ipr_chip[i].vendor == dev_id->vendor &&
ipr_chip[i].device == dev_id->device)
- return ipr_chip[i].cfg;
+ return &ipr_chip[i];
return NULL;
}
/**
+ * ipr_test_intr - Handle the interrupt generated in ipr_test_msi().
+ * @pdev: PCI device struct
+ *
+ * Description: Simply set the msi_received flag to 1 indicating that
+ * Message Signaled Interrupts are supported.
+ *
+ * Return value:
+ * 0 on success / non-zero on failure
+ **/
+static irqreturn_t __devinit ipr_test_intr(int irq, void *devp)
+{
+ struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp;
+ unsigned long lock_flags = 0;
+ irqreturn_t rc = IRQ_HANDLED;
+
+ spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
+
+ ioa_cfg->msi_received = 1;
+ wake_up(&ioa_cfg->msi_wait_q);
+
+ spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+ return rc;
+}
+
+/**
+ * ipr_test_msi - Test for Message Signaled Interrupt (MSI) support.
+ * @pdev: PCI device struct
+ *
+ * Description: The return value from pci_enable_msi() can not always be
+ * trusted. This routine sets up and initiates a test interrupt to determine
+ * if the interrupt is received via the ipr_test_intr() service routine.
+ * If the tests fails, the driver will fall back to LSI.
+ *
+ * Return value:
+ * 0 on success / non-zero on failure
+ **/
+static int __devinit ipr_test_msi(struct ipr_ioa_cfg *ioa_cfg,
+ struct pci_dev *pdev)
+{
+ int rc;
+ volatile u32 int_reg;
+ unsigned long lock_flags = 0;
+
+ ENTER;
+
+ spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
+ init_waitqueue_head(&ioa_cfg->msi_wait_q);
+ ioa_cfg->msi_received = 0;
+ ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
+ writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.clr_interrupt_mask_reg);
+ int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
+ spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+
+ rc = request_irq(pdev->irq, ipr_test_intr, 0, IPR_NAME, ioa_cfg);
+ if (rc) {
+ dev_err(&pdev->dev, "Can not assign irq %d\n", pdev->irq);
+ return rc;
+ } else if (ipr_debug)
+ dev_info(&pdev->dev, "IRQ assigned: %d\n", pdev->irq);
+
+ writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.sense_interrupt_reg);
+ int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
+ wait_event_timeout(ioa_cfg->msi_wait_q, ioa_cfg->msi_received, HZ);
+ ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
+
+ spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
+ if (!ioa_cfg->msi_received) {
+ /* MSI test failed */
+ dev_info(&pdev->dev, "MSI test failed. Falling back to LSI.\n");
+ rc = -EOPNOTSUPP;
+ } else if (ipr_debug)
+ dev_info(&pdev->dev, "MSI test succeeded.\n");
+
+ spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+
+ free_irq(pdev->irq, ioa_cfg);
+
+ LEAVE;
+
+ return rc;
+}
+
+/**
* ipr_probe_ioa - Allocates memory and does first stage of initialization
* @pdev: PCI device struct
* @dev_id: PCI device id struct
@@ -7441,11 +7525,6 @@ static int __devinit ipr_probe_ioa(struct pci_dev *pdev,
goto out;
}
- if (!(rc = pci_enable_msi(pdev)))
- dev_info(&pdev->dev, "MSI enabled\n");
- else if (ipr_debug)
- dev_info(&pdev->dev, "Cannot enable MSI\n");
-
dev_info(&pdev->dev, "Found IOA with IRQ: %d\n", pdev->irq);
host = scsi_host_alloc(&driver_template, sizeof(*ioa_cfg));
@@ -7461,14 +7540,16 @@ static int __devinit ipr_probe_ioa(struct pci_dev *pdev,
ata_host_init(&ioa_cfg->ata_host, &pdev->dev,
sata_port_info.flags, &ipr_sata_ops);
- ioa_cfg->chip_cfg = ipr_get_chip_cfg(dev_id);
+ ioa_cfg->ipr_chip = ipr_get_chip_info(dev_id);
- if (!ioa_cfg->chip_cfg) {
+ if (!ioa_cfg->ipr_chip) {
dev_err(&pdev->dev, "Unknown adapter chipset 0x%04X 0x%04X\n",
dev_id->vendor, dev_id->device);
goto out_scsi_host_put;
}
+ ioa_cfg->chip_cfg = ioa_cfg->ipr_chip->cfg;
+
if (ipr_transop_timeout)
ioa_cfg->transop_timeout = ipr_transop_timeout;
else if (dev_id->driver_data & IPR_USE_LONG_TRANSOP_TIMEOUT)
@@ -7519,6 +7600,18 @@ static int __devinit ipr_probe_ioa(struct pci_dev *pdev,
goto cleanup_nomem;
}
+ /* Enable MSI style interrupts if they are supported. */
+ if (ioa_cfg->ipr_chip->intr_type == IPR_USE_MSI && !pci_enable_msi(pdev)) {
+ rc = ipr_test_msi(ioa_cfg, pdev);
+ if (rc == -EOPNOTSUPP)
+ pci_disable_msi(pdev);
+ else if (rc)
+ goto out_msi_disable;
+ else
+ dev_info(&pdev->dev, "MSI enabled with IRQ: %d\n", pdev->irq);
+ } else if (ipr_debug)
+ dev_info(&pdev->dev, "Cannot enable MSI.\n");
+
/* Save away PCI config space for use following IOA reset */
rc = pci_save_state(pdev);
@@ -7556,7 +7649,9 @@ static int __devinit ipr_probe_ioa(struct pci_dev *pdev,
ioa_cfg->ioa_unit_checked = 1;
ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
- rc = request_irq(pdev->irq, ipr_isr, IRQF_SHARED, IPR_NAME, ioa_cfg);
+ rc = request_irq(pdev->irq, ipr_isr,
+ ioa_cfg->msi_received ? 0 : IRQF_SHARED,
+ IPR_NAME, ioa_cfg);
if (rc) {
dev_err(&pdev->dev, "Couldn't register IRQ %d! rc=%d\n",
@@ -7583,12 +7678,13 @@ cleanup_nolog:
ipr_free_mem(ioa_cfg);
cleanup_nomem:
iounmap(ipr_regs);
+out_msi_disable:
+ pci_disable_msi(pdev);
out_release_regions:
pci_release_regions(pdev);
out_scsi_host_put:
scsi_host_put(host);
out_disable:
- pci_disable_msi(pdev);
pci_disable_device(pdev);
goto out;
}
diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h
index 79a3ae4fb2c7..4b63dd6b1c81 100644
--- a/drivers/scsi/ipr.h
+++ b/drivers/scsi/ipr.h
@@ -37,8 +37,8 @@
/*
* Literals
*/
-#define IPR_DRIVER_VERSION "2.4.2"
-#define IPR_DRIVER_DATE "(January 21, 2009)"
+#define IPR_DRIVER_VERSION "2.4.3"
+#define IPR_DRIVER_DATE "(June 10, 2009)"
/*
* IPR_MAX_CMD_PER_LUN: This defines the maximum number of outstanding
@@ -1025,6 +1025,9 @@ struct ipr_chip_cfg_t {
struct ipr_chip_t {
u16 vendor;
u16 device;
+ u16 intr_type;
+#define IPR_USE_LSI 0x00
+#define IPR_USE_MSI 0x01
const struct ipr_chip_cfg_t *cfg;
};
@@ -1094,6 +1097,7 @@ struct ipr_ioa_cfg {
u8 needs_hard_reset:1;
u8 dual_raid:1;
u8 needs_warm_reset:1;
+ u8 msi_received:1;
u8 revid;
@@ -1159,6 +1163,7 @@ struct ipr_ioa_cfg {
unsigned int transop_timeout;
const struct ipr_chip_cfg_t *chip_cfg;
+ const struct ipr_chip_t *ipr_chip;
void __iomem *hdw_dma_regs; /* iomapped PCI memory space */
unsigned long hdw_dma_regs_pci; /* raw PCI memory space */
@@ -1179,6 +1184,7 @@ struct ipr_ioa_cfg {
struct work_struct work_q;
wait_queue_head_t reset_wait_q;
+ wait_queue_head_t msi_wait_q;
struct ipr_dump *dump;
enum ipr_sdt_state sdt_state;
diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c
index b7c092d63bbe..518dbd91df85 100644
--- a/drivers/scsi/iscsi_tcp.c
+++ b/drivers/scsi/iscsi_tcp.c
@@ -253,8 +253,6 @@ static int iscsi_sw_tcp_xmit_segment(struct iscsi_tcp_conn *tcp_conn,
if (r < 0) {
iscsi_tcp_segment_unmap(segment);
- if (copied || r == -EAGAIN)
- break;
return r;
}
copied += r;
@@ -275,11 +273,17 @@ static int iscsi_sw_tcp_xmit(struct iscsi_conn *conn)
while (1) {
rc = iscsi_sw_tcp_xmit_segment(tcp_conn, segment);
- if (rc < 0) {
+ /*
+ * We may not have been able to send data because the conn
+ * is getting stopped. libiscsi will know so propogate err
+ * for it to do the right thing.
+ */
+ if (rc == -EAGAIN)
+ return rc;
+ else if (rc < 0) {
rc = ISCSI_ERR_XMIT_FAILED;
goto error;
- }
- if (rc == 0)
+ } else if (rc == 0)
break;
consumed += rc;
diff --git a/drivers/scsi/libfc/fc_disc.c b/drivers/scsi/libfc/fc_disc.c
index 4c880656990b..6fabf66972b9 100644
--- a/drivers/scsi/libfc/fc_disc.c
+++ b/drivers/scsi/libfc/fc_disc.c
@@ -45,14 +45,6 @@
#define FC_DISC_DELAY 3
-static int fc_disc_debug;
-
-#define FC_DEBUG_DISC(fmt...) \
- do { \
- if (fc_disc_debug) \
- FC_DBG(fmt); \
- } while (0)
-
static void fc_disc_gpn_ft_req(struct fc_disc *);
static void fc_disc_gpn_ft_resp(struct fc_seq *, struct fc_frame *, void *);
static int fc_disc_new_target(struct fc_disc *, struct fc_rport *,
@@ -137,8 +129,8 @@ static void fc_disc_rport_callback(struct fc_lport *lport,
struct fc_rport_libfc_priv *rdata = rport->dd_data;
struct fc_disc *disc = &lport->disc;
- FC_DEBUG_DISC("Received a %d event for port (%6x)\n", event,
- rport->port_id);
+ FC_DISC_DBG(disc, "Received a %d event for port (%6x)\n", event,
+ rport->port_id);
switch (event) {
case RPORT_EV_CREATED:
@@ -191,8 +183,7 @@ static void fc_disc_recv_rscn_req(struct fc_seq *sp, struct fc_frame *fp,
lport = disc->lport;
- FC_DEBUG_DISC("Received an RSCN event on port (%6x)\n",
- fc_host_port_id(lport->host));
+ FC_DISC_DBG(disc, "Received an RSCN event\n");
/* make sure the frame contains an RSCN message */
rp = fc_frame_payload_get(fp, sizeof(*rp));
@@ -225,8 +216,8 @@ static void fc_disc_recv_rscn_req(struct fc_seq *sp, struct fc_frame *fp,
*/
switch (fmt) {
case ELS_ADDR_FMT_PORT:
- FC_DEBUG_DISC("Port address format for port (%6x)\n",
- ntoh24(pp->rscn_fid));
+ FC_DISC_DBG(disc, "Port address format for port "
+ "(%6x)\n", ntoh24(pp->rscn_fid));
dp = kzalloc(sizeof(*dp), GFP_KERNEL);
if (!dp) {
redisc = 1;
@@ -243,19 +234,19 @@ static void fc_disc_recv_rscn_req(struct fc_seq *sp, struct fc_frame *fp,
case ELS_ADDR_FMT_DOM:
case ELS_ADDR_FMT_FAB:
default:
- FC_DEBUG_DISC("Address format is (%d)\n", fmt);
+ FC_DISC_DBG(disc, "Address format is (%d)\n", fmt);
redisc = 1;
break;
}
}
lport->tt.seq_els_rsp_send(sp, ELS_LS_ACC, NULL);
if (redisc) {
- FC_DEBUG_DISC("RSCN received: rediscovering\n");
+ FC_DISC_DBG(disc, "RSCN received: rediscovering\n");
fc_disc_restart(disc);
} else {
- FC_DEBUG_DISC("RSCN received: not rediscovering. "
- "redisc %d state %d in_prog %d\n",
- redisc, lport->state, disc->pending);
+ FC_DISC_DBG(disc, "RSCN received: not rediscovering. "
+ "redisc %d state %d in_prog %d\n",
+ redisc, lport->state, disc->pending);
list_for_each_entry_safe(dp, next, &disc_ports, peers) {
list_del(&dp->peers);
rport = lport->tt.rport_lookup(lport, dp->ids.port_id);
@@ -270,7 +261,7 @@ static void fc_disc_recv_rscn_req(struct fc_seq *sp, struct fc_frame *fp,
fc_frame_free(fp);
return;
reject:
- FC_DEBUG_DISC("Received a bad RSCN frame\n");
+ FC_DISC_DBG(disc, "Received a bad RSCN frame\n");
rjt_data.fp = NULL;
rjt_data.reason = ELS_RJT_LOGIC;
rjt_data.explan = ELS_EXPL_NONE;
@@ -302,7 +293,8 @@ static void fc_disc_recv_req(struct fc_seq *sp, struct fc_frame *fp,
mutex_unlock(&disc->disc_mutex);
break;
default:
- FC_DBG("Received an unsupported request. opcode (%x)\n", op);
+ FC_DISC_DBG(disc, "Received an unsupported request, "
+ "the opcode is (%x)\n", op);
break;
}
}
@@ -320,12 +312,10 @@ static void fc_disc_restart(struct fc_disc *disc)
struct fc_rport_libfc_priv *rdata, *next;
struct fc_lport *lport = disc->lport;
- FC_DEBUG_DISC("Restarting discovery for port (%6x)\n",
- fc_host_port_id(lport->host));
+ FC_DISC_DBG(disc, "Restarting discovery\n");
list_for_each_entry_safe(rdata, next, &disc->rports, peers) {
rport = PRIV_TO_RPORT(rdata);
- FC_DEBUG_DISC("list_del(%6x)\n", rport->port_id);
list_del(&rdata->peers);
lport->tt.rport_logoff(rport);
}
@@ -485,8 +475,7 @@ static void fc_disc_done(struct fc_disc *disc)
struct fc_lport *lport = disc->lport;
enum fc_disc_event event;
- FC_DEBUG_DISC("Discovery complete for port (%6x)\n",
- fc_host_port_id(lport->host));
+ FC_DISC_DBG(disc, "Discovery complete\n");
event = disc->event;
disc->event = DISC_EV_NONE;
@@ -510,10 +499,10 @@ static void fc_disc_error(struct fc_disc *disc, struct fc_frame *fp)
{
struct fc_lport *lport = disc->lport;
unsigned long delay = 0;
- if (fc_disc_debug)
- FC_DBG("Error %ld, retries %d/%d\n",
- PTR_ERR(fp), disc->retry_count,
- FC_DISC_RETRY_LIMIT);
+
+ FC_DISC_DBG(disc, "Error %ld, retries %d/%d\n",
+ PTR_ERR(fp), disc->retry_count,
+ FC_DISC_RETRY_LIMIT);
if (!fp || PTR_ERR(fp) == -FC_EX_TIMEOUT) {
/*
@@ -649,9 +638,9 @@ static int fc_disc_gpn_ft_parse(struct fc_disc *disc, void *buf, size_t len)
&disc->rogue_rports);
lport->tt.rport_login(rport);
} else
- FC_DBG("Failed to allocate memory for "
- "the newly discovered port (%6x)\n",
- dp.ids.port_id);
+ printk(KERN_WARNING "libfc: Failed to allocate "
+ "memory for the newly discovered port "
+ "(%6x)\n", dp.ids.port_id);
}
if (np->fp_flags & FC_NS_FID_LAST) {
@@ -671,9 +660,8 @@ static int fc_disc_gpn_ft_parse(struct fc_disc *disc, void *buf, size_t len)
*/
if (error == 0 && len > 0 && len < sizeof(*np)) {
if (np != &disc->partial_buf) {
- FC_DEBUG_DISC("Partial buffer remains "
- "for discovery by (%6x)\n",
- fc_host_port_id(lport->host));
+ FC_DISC_DBG(disc, "Partial buffer remains "
+ "for discovery\n");
memcpy(&disc->partial_buf, np, len);
}
disc->buf_len = (unsigned char) len;
@@ -721,8 +709,7 @@ static void fc_disc_gpn_ft_resp(struct fc_seq *sp, struct fc_frame *fp,
int error;
mutex_lock(&disc->disc_mutex);
- FC_DEBUG_DISC("Received a GPN_FT response on port (%6x)\n",
- fc_host_port_id(disc->lport->host));
+ FC_DISC_DBG(disc, "Received a GPN_FT response\n");
if (IS_ERR(fp)) {
fc_disc_error(disc, fp);
@@ -738,30 +725,30 @@ static void fc_disc_gpn_ft_resp(struct fc_seq *sp, struct fc_frame *fp,
disc->seq_count == 0) {
cp = fc_frame_payload_get(fp, sizeof(*cp));
if (!cp) {
- FC_DBG("GPN_FT response too short, len %d\n",
- fr_len(fp));
+ FC_DISC_DBG(disc, "GPN_FT response too short, len %d\n",
+ fr_len(fp));
} else if (ntohs(cp->ct_cmd) == FC_FS_ACC) {
/* Accepted, parse the response. */
buf = cp + 1;
len -= sizeof(*cp);
} else if (ntohs(cp->ct_cmd) == FC_FS_RJT) {
- FC_DBG("GPN_FT rejected reason %x exp %x "
- "(check zoning)\n", cp->ct_reason,
- cp->ct_explan);
+ FC_DISC_DBG(disc, "GPN_FT rejected reason %x exp %x "
+ "(check zoning)\n", cp->ct_reason,
+ cp->ct_explan);
disc->event = DISC_EV_FAILED;
fc_disc_done(disc);
} else {
- FC_DBG("GPN_FT unexpected response code %x\n",
- ntohs(cp->ct_cmd));
+ FC_DISC_DBG(disc, "GPN_FT unexpected response code "
+ "%x\n", ntohs(cp->ct_cmd));
}
} else if (fr_sof(fp) == FC_SOF_N3 &&
seq_cnt == disc->seq_count) {
buf = fh + 1;
} else {
- FC_DBG("GPN_FT unexpected frame - out of sequence? "
- "seq_cnt %x expected %x sof %x eof %x\n",
- seq_cnt, disc->seq_count, fr_sof(fp), fr_eof(fp));
+ FC_DISC_DBG(disc, "GPN_FT unexpected frame - out of sequence? "
+ "seq_cnt %x expected %x sof %x eof %x\n",
+ seq_cnt, disc->seq_count, fr_sof(fp), fr_eof(fp));
}
if (buf) {
error = fc_disc_gpn_ft_parse(disc, buf, len);
diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
index 7af9bceb8aa9..2bc22be5f849 100644
--- a/drivers/scsi/libfc/fc_exch.c
+++ b/drivers/scsi/libfc/fc_exch.c
@@ -32,18 +32,7 @@
#include <scsi/libfc.h>
#include <scsi/fc_encode.h>
-/*
- * fc_exch_debug can be set in debugger or at compile time to get more logs.
- */
-static int fc_exch_debug;
-
-#define FC_DEBUG_EXCH(fmt...) \
- do { \
- if (fc_exch_debug) \
- FC_DBG(fmt); \
- } while (0)
-
-static struct kmem_cache *fc_em_cachep; /* cache for exchanges */
+static struct kmem_cache *fc_em_cachep; /* cache for exchanges */
/*
* Structure and function definitions for managing Fibre Channel Exchanges
@@ -333,8 +322,8 @@ static inline void fc_exch_timer_set_locked(struct fc_exch *ep,
if (ep->state & (FC_EX_RST_CLEANUP | FC_EX_DONE))
return;
- FC_DEBUG_EXCH("Exchange (%4x) timed out, notifying the upper layer\n",
- ep->xid);
+ FC_EXCH_DBG(ep, "Exchange timed out, notifying the upper layer\n");
+
if (schedule_delayed_work(&ep->timeout_work,
msecs_to_jiffies(timer_msec)))
fc_exch_hold(ep); /* hold for timer */
@@ -545,7 +534,7 @@ struct fc_exch *fc_exch_alloc(struct fc_exch_mgr *mp,
/* alloc a new xid */
xid = fc_em_alloc_xid(mp, fp);
if (!xid) {
- printk(KERN_ERR "fc_em_alloc_xid() failed\n");
+ printk(KERN_WARNING "libfc: Failed to allocate an exhange\n");
goto err;
}
}
@@ -820,8 +809,8 @@ static struct fc_seq *fc_seq_start_next_locked(struct fc_seq *sp)
struct fc_exch *ep = fc_seq_exch(sp);
sp = fc_seq_alloc(ep, ep->seq_id++);
- FC_DEBUG_EXCH("exch %4x f_ctl %6x seq %2x\n",
- ep->xid, ep->f_ctl, sp->id);
+ FC_EXCH_DBG(ep, "f_ctl %6x seq %2x\n",
+ ep->f_ctl, sp->id);
return sp;
}
/*
@@ -901,7 +890,7 @@ void fc_seq_els_rsp_send(struct fc_seq *sp, enum fc_els_cmd els_cmd,
fc_exch_els_rec(sp, els_data->fp);
break;
default:
- FC_DBG("Invalid ELS CMD:%x\n", els_cmd);
+ FC_EXCH_DBG(fc_seq_exch(sp), "Invalid ELS CMD:%x\n", els_cmd);
}
}
EXPORT_SYMBOL(fc_seq_els_rsp_send);
@@ -1134,7 +1123,7 @@ static void fc_exch_recv_req(struct fc_lport *lp, struct fc_exch_mgr *mp,
lp->tt.lport_recv(lp, sp, fp);
fc_exch_release(ep); /* release from lookup */
} else {
- FC_DEBUG_EXCH("exch/seq lookup failed: reject %x\n", reject);
+ FC_EM_DBG(mp, "exch/seq lookup failed: reject %x\n", reject);
fc_frame_free(fp);
}
}
@@ -1242,10 +1231,10 @@ static void fc_exch_recv_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
sp = fc_seq_lookup_orig(mp, fp); /* doesn't hold sequence */
if (!sp) {
atomic_inc(&mp->stats.xid_not_found);
- FC_DEBUG_EXCH("seq lookup failed\n");
+ FC_EM_DBG(mp, "seq lookup failed\n");
} else {
atomic_inc(&mp->stats.non_bls_resp);
- FC_DEBUG_EXCH("non-BLS response to sequence");
+ FC_EM_DBG(mp, "non-BLS response to sequence");
}
fc_frame_free(fp);
}
@@ -1266,8 +1255,8 @@ static void fc_exch_abts_resp(struct fc_exch *ep, struct fc_frame *fp)
int rc = 1, has_rec = 0;
fh = fc_frame_header_get(fp);
- FC_DEBUG_EXCH("exch: BLS rctl %x - %s\n",
- fh->fh_r_ctl, fc_exch_rctl_name(fh->fh_r_ctl));
+ FC_EXCH_DBG(ep, "exch: BLS rctl %x - %s\n", fh->fh_r_ctl,
+ fc_exch_rctl_name(fh->fh_r_ctl));
if (cancel_delayed_work_sync(&ep->timeout_work))
fc_exch_release(ep); /* release from pending timer hold */
@@ -1359,9 +1348,9 @@ static void fc_exch_recv_bls(struct fc_exch_mgr *mp, struct fc_frame *fp)
case FC_RCTL_ACK_0:
break;
default:
- FC_DEBUG_EXCH("BLS rctl %x - %s received",
- fh->fh_r_ctl,
- fc_exch_rctl_name(fh->fh_r_ctl));
+ FC_EXCH_DBG(ep, "BLS rctl %x - %s received",
+ fh->fh_r_ctl,
+ fc_exch_rctl_name(fh->fh_r_ctl));
break;
}
fc_frame_free(fp);
@@ -1599,7 +1588,8 @@ static void fc_exch_rrq_resp(struct fc_seq *sp, struct fc_frame *fp, void *arg)
if (err == -FC_EX_CLOSED || err == -FC_EX_TIMEOUT)
goto cleanup;
- FC_DBG("Cannot process RRQ, because of frame error %d\n", err);
+ FC_EXCH_DBG(aborted_ep, "Cannot process RRQ, "
+ "frame error %d\n", err);
return;
}
@@ -1608,12 +1598,13 @@ static void fc_exch_rrq_resp(struct fc_seq *sp, struct fc_frame *fp, void *arg)
switch (op) {
case ELS_LS_RJT:
- FC_DBG("LS_RJT for RRQ");
+ FC_EXCH_DBG(aborted_ep, "LS_RJT for RRQ");
/* fall through */
case ELS_LS_ACC:
goto cleanup;
default:
- FC_DBG("unexpected response op %x for RRQ", op);
+ FC_EXCH_DBG(aborted_ep, "unexpected response op %x "
+ "for RRQ", op);
return;
}
@@ -1740,8 +1731,8 @@ struct fc_exch_mgr *fc_exch_mgr_alloc(struct fc_lport *lp,
size_t len;
if (max_xid <= min_xid || min_xid == 0 || max_xid == FC_XID_UNKNOWN) {
- FC_DBG("Invalid min_xid 0x:%x and max_xid 0x:%x\n",
- min_xid, max_xid);
+ FC_LPORT_DBG(lp, "Invalid min_xid 0x:%x and max_xid 0x:%x\n",
+ min_xid, max_xid);
return NULL;
}
@@ -1878,7 +1869,8 @@ void fc_exch_recv(struct fc_lport *lp, struct fc_exch_mgr *mp,
/* lport lock ? */
if (!lp || !mp || (lp->state == LPORT_ST_NONE)) {
- FC_DBG("fc_lport or EM is not allocated and configured");
+ FC_LPORT_DBG(lp, "Receiving frames for an lport that "
+ "has not been initialized correctly\n");
fc_frame_free(fp);
return;
}
@@ -1904,7 +1896,7 @@ void fc_exch_recv(struct fc_lport *lp, struct fc_exch_mgr *mp,
fc_exch_recv_req(lp, mp, fp);
break;
default:
- FC_DBG("dropping invalid frame (eof %x)", fr_eof(fp));
+ FC_EM_DBG(mp, "dropping invalid frame (eof %x)", fr_eof(fp));
fc_frame_free(fp);
break;
}
diff --git a/drivers/scsi/libfc/fc_fcp.c b/drivers/scsi/libfc/fc_fcp.c
index ad8b747837b0..e303e0d12c4b 100644
--- a/drivers/scsi/libfc/fc_fcp.c
+++ b/drivers/scsi/libfc/fc_fcp.c
@@ -43,13 +43,9 @@ MODULE_AUTHOR("Open-FCoE.org");
MODULE_DESCRIPTION("libfc");
MODULE_LICENSE("GPL v2");
-static int fc_fcp_debug;
-
-#define FC_DEBUG_FCP(fmt...) \
- do { \
- if (fc_fcp_debug) \
- FC_DBG(fmt); \
- } while (0)
+unsigned int fc_debug_logging;
+module_param_named(debug_logging, fc_debug_logging, int, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(debug_logging, "a bit mask of logging levels");
static struct kmem_cache *scsi_pkt_cachep;
@@ -347,8 +343,8 @@ static void fc_fcp_recv_data(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
if ((fr_flags(fp) & FCPHF_CRC_UNCHECKED) &&
fc_frame_crc_check(fp))
goto crc_err;
- FC_DEBUG_FCP("data received past end. len %zx offset %zx "
- "data_len %x\n", len, offset, fsp->data_len);
+ FC_FCP_DBG(fsp, "data received past end. len %zx offset %zx "
+ "data_len %x\n", len, offset, fsp->data_len);
fc_fcp_retry_cmd(fsp);
return;
}
@@ -411,7 +407,8 @@ crc_err:
stats->ErrorFrames++;
/* FIXME - per cpu count, not total count! */
if (stats->InvalidCRCCount++ < 5)
- printk(KERN_WARNING "CRC error on data frame for port (%6x)\n",
+ printk(KERN_WARNING "libfc: CRC error on data "
+ "frame for port (%6x)\n",
fc_host_port_id(lp->host));
/*
* Assume the frame is total garbage.
@@ -475,14 +472,14 @@ static int fc_fcp_send_data(struct fc_fcp_pkt *fsp, struct fc_seq *seq,
WARN_ON(seq_blen <= 0);
if (unlikely(offset + seq_blen > fsp->data_len)) {
/* this should never happen */
- FC_DEBUG_FCP("xfer-ready past end. seq_blen %zx offset %zx\n",
- seq_blen, offset);
+ FC_FCP_DBG(fsp, "xfer-ready past end. seq_blen %zx "
+ "offset %zx\n", seq_blen, offset);
fc_fcp_send_abort(fsp);
return 0;
} else if (offset != fsp->xfer_len) {
/* Out of Order Data Request - no problem, but unexpected. */
- FC_DEBUG_FCP("xfer-ready non-contiguous. "
- "seq_blen %zx offset %zx\n", seq_blen, offset);
+ FC_FCP_DBG(fsp, "xfer-ready non-contiguous. "
+ "seq_blen %zx offset %zx\n", seq_blen, offset);
}
/*
@@ -493,7 +490,7 @@ static int fc_fcp_send_data(struct fc_fcp_pkt *fsp, struct fc_seq *seq,
t_blen = fsp->max_payload;
if (lp->seq_offload) {
t_blen = min(seq_blen, (size_t)lp->lso_max);
- FC_DEBUG_FCP("fsp=%p:lso:blen=%zx lso_max=0x%x t_blen=%zx\n",
+ FC_FCP_DBG(fsp, "fsp=%p:lso:blen=%zx lso_max=0x%x t_blen=%zx\n",
fsp, seq_blen, lp->lso_max, t_blen);
}
@@ -694,7 +691,7 @@ static void fc_fcp_reduce_can_queue(struct fc_lport *lp)
if (!can_queue)
can_queue = 1;
lp->host->can_queue = can_queue;
- shost_printk(KERN_ERR, lp->host, "Could not allocate frame.\n"
+ shost_printk(KERN_ERR, lp->host, "libfc: Could not allocate frame.\n"
"Reducing can_queue to %d.\n", can_queue);
done:
spin_unlock_irqrestore(lp->host->host_lock, flags);
@@ -768,7 +765,7 @@ static void fc_fcp_recv(struct fc_seq *seq, struct fc_frame *fp, void *arg)
fc_fcp_resp(fsp, fp);
} else {
- FC_DBG("unexpected frame. r_ctl %x\n", r_ctl);
+ FC_FCP_DBG(fsp, "unexpected frame. r_ctl %x\n", r_ctl);
}
unlock:
fc_fcp_unlock_pkt(fsp);
@@ -877,17 +874,17 @@ static void fc_fcp_resp(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
return;
}
fsp->status_code = FC_DATA_OVRRUN;
- FC_DBG("tgt %6x xfer len %zx greater than expected len %x. "
- "data len %x\n",
- fsp->rport->port_id,
- fsp->xfer_len, expected_len, fsp->data_len);
+ FC_FCP_DBG(fsp, "tgt %6x xfer len %zx greater than expected, "
+ "len %x, data len %x\n",
+ fsp->rport->port_id,
+ fsp->xfer_len, expected_len, fsp->data_len);
}
fc_fcp_complete_locked(fsp);
return;
len_err:
- FC_DBG("short FCP response. flags 0x%x len %u respl %u snsl %u\n",
- flags, fr_len(fp), respl, snsl);
+ FC_FCP_DBG(fsp, "short FCP response. flags 0x%x len %u respl %u "
+ "snsl %u\n", flags, fr_len(fp), respl, snsl);
err:
fsp->status_code = FC_ERROR;
fc_fcp_complete_locked(fsp);
@@ -1107,13 +1104,11 @@ static void fc_fcp_error(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
if (fc_fcp_lock_pkt(fsp))
return;
- switch (error) {
- case -FC_EX_CLOSED:
+ if (error == -FC_EX_CLOSED) {
fc_fcp_retry_cmd(fsp);
goto unlock;
- default:
- FC_DBG("unknown error %ld\n", PTR_ERR(fp));
}
+
/*
* clear abort pending, because the lower layer
* decided to force completion.
@@ -1145,10 +1140,10 @@ static int fc_fcp_pkt_abort(struct fc_lport *lp, struct fc_fcp_pkt *fsp)
fsp->wait_for_comp = 0;
if (!rc) {
- FC_DBG("target abort cmd failed\n");
+ FC_FCP_DBG(fsp, "target abort cmd failed\n");
rc = FAILED;
} else if (fsp->state & FC_SRB_ABORTED) {
- FC_DBG("target abort cmd passed\n");
+ FC_FCP_DBG(fsp, "target abort cmd passed\n");
rc = SUCCESS;
fc_fcp_complete_locked(fsp);
}
@@ -1213,7 +1208,7 @@ static int fc_lun_reset(struct fc_lport *lp, struct fc_fcp_pkt *fsp,
spin_unlock_bh(&fsp->scsi_pkt_lock);
if (!rc) {
- FC_DBG("lun reset failed\n");
+ FC_SCSI_DBG(lp, "lun reset failed\n");
return FAILED;
}
@@ -1221,7 +1216,7 @@ static int fc_lun_reset(struct fc_lport *lp, struct fc_fcp_pkt *fsp,
if (fsp->cdb_status != FCP_TMF_CMPL)
return FAILED;
- FC_DBG("lun reset to lun %u completed\n", lun);
+ FC_SCSI_DBG(lp, "lun reset to lun %u completed\n", lun);
fc_fcp_cleanup_each_cmd(lp, id, lun, FC_CMD_ABORTED);
return SUCCESS;
}
@@ -1388,13 +1383,13 @@ static void fc_fcp_rec_resp(struct fc_seq *seq, struct fc_frame *fp, void *arg)
rjt = fc_frame_payload_get(fp, sizeof(*rjt));
switch (rjt->er_reason) {
default:
- FC_DEBUG_FCP("device %x unexpected REC reject "
- "reason %d expl %d\n",
- fsp->rport->port_id, rjt->er_reason,
- rjt->er_explan);
+ FC_FCP_DBG(fsp, "device %x unexpected REC reject "
+ "reason %d expl %d\n",
+ fsp->rport->port_id, rjt->er_reason,
+ rjt->er_explan);
/* fall through */
case ELS_RJT_UNSUP:
- FC_DEBUG_FCP("device does not support REC\n");
+ FC_FCP_DBG(fsp, "device does not support REC\n");
rp = fsp->rport->dd_data;
/*
* if we do not spport RECs or got some bogus
@@ -1514,8 +1509,8 @@ static void fc_fcp_rec_error(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
break;
default:
- FC_DBG("REC %p fid %x error unexpected error %d\n",
- fsp, fsp->rport->port_id, error);
+ FC_FCP_DBG(fsp, "REC %p fid %x error unexpected error %d\n",
+ fsp, fsp->rport->port_id, error);
fsp->status_code = FC_CMD_PLOGO;
/* fall through */
@@ -1524,9 +1519,9 @@ static void fc_fcp_rec_error(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
* Assume REC or LS_ACC was lost.
* The exchange manager will have aborted REC, so retry.
*/
- FC_DBG("REC fid %x error error %d retry %d/%d\n",
- fsp->rport->port_id, error, fsp->recov_retry,
- FC_MAX_RECOV_RETRY);
+ FC_FCP_DBG(fsp, "REC fid %x error error %d retry %d/%d\n",
+ fsp->rport->port_id, error, fsp->recov_retry,
+ FC_MAX_RECOV_RETRY);
if (fsp->recov_retry++ < FC_MAX_RECOV_RETRY)
fc_fcp_rec(fsp);
else
@@ -2011,9 +2006,11 @@ int fc_eh_device_reset(struct scsi_cmnd *sc_cmd)
if (lp->state != LPORT_ST_READY)
return rc;
+ FC_SCSI_DBG(lp, "Resetting rport (%6x)\n", rport->port_id);
+
fsp = fc_fcp_pkt_alloc(lp, GFP_NOIO);
if (fsp == NULL) {
- FC_DBG("could not allocate scsi_pkt\n");
+ printk(KERN_WARNING "libfc: could not allocate scsi_pkt\n");
sc_cmd->result = DID_NO_CONNECT << 16;
goto out;
}
@@ -2048,17 +2045,21 @@ int fc_eh_host_reset(struct scsi_cmnd *sc_cmd)
struct fc_lport *lp = shost_priv(shost);
unsigned long wait_tmo;
+ FC_SCSI_DBG(lp, "Resetting host\n");
+
lp->tt.lport_reset(lp);
wait_tmo = jiffies + FC_HOST_RESET_TIMEOUT;
while (!fc_fcp_lport_queue_ready(lp) && time_before(jiffies, wait_tmo))
msleep(1000);
if (fc_fcp_lport_queue_ready(lp)) {
- shost_printk(KERN_INFO, shost, "Host reset succeeded.\n");
+ shost_printk(KERN_INFO, shost, "libfc: Host reset succeeded "
+ "on port (%6x)\n", fc_host_port_id(lp->host));
return SUCCESS;
} else {
- shost_printk(KERN_INFO, shost, "Host reset failed. "
- "lport not ready.\n");
+ shost_printk(KERN_INFO, shost, "libfc: Host reset failed, "
+ "port (%6x) is not ready.\n",
+ fc_host_port_id(lp->host));
return FAILED;
}
}
@@ -2117,7 +2118,8 @@ void fc_fcp_destroy(struct fc_lport *lp)
struct fc_fcp_internal *si = fc_get_scsi_internal(lp);
if (!list_empty(&si->scsi_pkt_queue))
- printk(KERN_ERR "Leaked scsi packets.\n");
+ printk(KERN_ERR "libfc: Leaked SCSI packets when destroying "
+ "port (%6x)\n", fc_host_port_id(lp->host));
mempool_destroy(si->scsi_pkt_pool);
kfree(si);
@@ -2166,7 +2168,8 @@ static int __init libfc_init(void)
sizeof(struct fc_fcp_pkt),
0, SLAB_HWCACHE_ALIGN, NULL);
if (scsi_pkt_cachep == NULL) {
- FC_DBG("Unable to allocate SRB cache...module load failed!");
+ printk(KERN_ERR "libfc: Unable to allocate SRB cache, "
+ "module load failed!");
return -ENOMEM;
}
diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c
index e0c247724d2b..745fa5555d6a 100644
--- a/drivers/scsi/libfc/fc_lport.c
+++ b/drivers/scsi/libfc/fc_lport.c
@@ -101,14 +101,6 @@
#define DNS_DELAY 3 /* Discovery delay after RSCN (in seconds)*/
-static int fc_lport_debug;
-
-#define FC_DEBUG_LPORT(fmt...) \
- do { \
- if (fc_lport_debug) \
- FC_DBG(fmt); \
- } while (0)
-
static void fc_lport_error(struct fc_lport *, struct fc_frame *);
static void fc_lport_enter_reset(struct fc_lport *);
@@ -151,8 +143,8 @@ static void fc_lport_rport_callback(struct fc_lport *lport,
struct fc_rport *rport,
enum fc_rport_event event)
{
- FC_DEBUG_LPORT("Received a %d event for port (%6x)\n", event,
- rport->port_id);
+ FC_LPORT_DBG(lport, "Received a %d event for port (%6x)\n", event,
+ rport->port_id);
switch (event) {
case RPORT_EV_CREATED:
@@ -162,19 +154,19 @@ static void fc_lport_rport_callback(struct fc_lport *lport,
lport->dns_rp = rport;
fc_lport_enter_rpn_id(lport);
} else {
- FC_DEBUG_LPORT("Received an CREATED event on "
- "port (%6x) for the directory "
- "server, but the lport is not "
- "in the DNS state, it's in the "
- "%d state", rport->port_id,
- lport->state);
+ FC_LPORT_DBG(lport, "Received an CREATED event "
+ "on port (%6x) for the directory "
+ "server, but the lport is not "
+ "in the DNS state, it's in the "
+ "%d state", rport->port_id,
+ lport->state);
lport->tt.rport_logoff(rport);
}
mutex_unlock(&lport->lp_mutex);
} else
- FC_DEBUG_LPORT("Received an event for port (%6x) "
- "which is not the directory server\n",
- rport->port_id);
+ FC_LPORT_DBG(lport, "Received an event for port (%6x) "
+ "which is not the directory server\n",
+ rport->port_id);
break;
case RPORT_EV_LOGO:
case RPORT_EV_FAILED:
@@ -185,9 +177,9 @@ static void fc_lport_rport_callback(struct fc_lport *lport,
mutex_unlock(&lport->lp_mutex);
} else
- FC_DEBUG_LPORT("Received an event for port (%6x) "
- "which is not the directory server\n",
- rport->port_id);
+ FC_LPORT_DBG(lport, "Received an event for port (%6x) "
+ "which is not the directory server\n",
+ rport->port_id);
break;
case RPORT_EV_NONE:
break;
@@ -363,8 +355,8 @@ static void fc_lport_add_fc4_type(struct fc_lport *lport, enum fc_fh_type type)
static void fc_lport_recv_rlir_req(struct fc_seq *sp, struct fc_frame *fp,
struct fc_lport *lport)
{
- FC_DEBUG_LPORT("Received RLIR request while in state %s\n",
- fc_lport_state(lport));
+ FC_LPORT_DBG(lport, "Received RLIR request while in state %s\n",
+ fc_lport_state(lport));
lport->tt.seq_els_rsp_send(sp, ELS_LS_ACC, NULL);
fc_frame_free(fp);
@@ -389,8 +381,8 @@ static void fc_lport_recv_echo_req(struct fc_seq *sp, struct fc_frame *in_fp,
void *dp;
u32 f_ctl;
- FC_DEBUG_LPORT("Received RLIR request while in state %s\n",
- fc_lport_state(lport));
+ FC_LPORT_DBG(lport, "Received RLIR request while in state %s\n",
+ fc_lport_state(lport));
len = fr_len(in_fp) - sizeof(struct fc_frame_header);
pp = fc_frame_payload_get(in_fp, len);
@@ -437,8 +429,8 @@ static void fc_lport_recv_rnid_req(struct fc_seq *sp, struct fc_frame *in_fp,
size_t len;
u32 f_ctl;
- FC_DEBUG_LPORT("Received RNID request while in state %s\n",
- fc_lport_state(lport));
+ FC_LPORT_DBG(lport, "Received RNID request while in state %s\n",
+ fc_lport_state(lport));
req = fc_frame_payload_get(in_fp, sizeof(*req));
if (!req) {
@@ -498,8 +490,8 @@ static void fc_lport_recv_adisc_req(struct fc_seq *sp, struct fc_frame *in_fp,
size_t len;
u32 f_ctl;
- FC_DEBUG_LPORT("Received ADISC request while in state %s\n",
- fc_lport_state(lport));
+ FC_LPORT_DBG(lport, "Received ADISC request while in state %s\n",
+ fc_lport_state(lport));
req = fc_frame_payload_get(in_fp, sizeof(*req));
if (!req) {
@@ -574,8 +566,8 @@ EXPORT_SYMBOL(fc_fabric_login);
*/
void fc_linkup(struct fc_lport *lport)
{
- FC_DEBUG_LPORT("Link is up for port (%6x)\n",
- fc_host_port_id(lport->host));
+ printk(KERN_INFO "libfc: Link up on port (%6x)\n",
+ fc_host_port_id(lport->host));
mutex_lock(&lport->lp_mutex);
if (!lport->link_up) {
@@ -595,8 +587,8 @@ EXPORT_SYMBOL(fc_linkup);
void fc_linkdown(struct fc_lport *lport)
{
mutex_lock(&lport->lp_mutex);
- FC_DEBUG_LPORT("Link is down for port (%6x)\n",
- fc_host_port_id(lport->host));
+ printk(KERN_INFO "libfc: Link down on port (%6x)\n",
+ fc_host_port_id(lport->host));
if (lport->link_up) {
lport->link_up = 0;
@@ -701,12 +693,11 @@ void fc_lport_disc_callback(struct fc_lport *lport, enum fc_disc_event event)
{
switch (event) {
case DISC_EV_SUCCESS:
- FC_DEBUG_LPORT("Got a SUCCESS event for port (%6x)\n",
- fc_host_port_id(lport->host));
+ FC_LPORT_DBG(lport, "Discovery succeeded\n");
break;
case DISC_EV_FAILED:
- FC_DEBUG_LPORT("Got a FAILED event for port (%6x)\n",
- fc_host_port_id(lport->host));
+ printk(KERN_ERR "libfc: Discovery failed for port (%6x)\n",
+ fc_host_port_id(lport->host));
mutex_lock(&lport->lp_mutex);
fc_lport_enter_reset(lport);
mutex_unlock(&lport->lp_mutex);
@@ -726,8 +717,8 @@ void fc_lport_disc_callback(struct fc_lport *lport, enum fc_disc_event event)
*/
static void fc_lport_enter_ready(struct fc_lport *lport)
{
- FC_DEBUG_LPORT("Port (%6x) entered Ready from state %s\n",
- fc_host_port_id(lport->host), fc_lport_state(lport));
+ FC_LPORT_DBG(lport, "Entered READY from state %s\n",
+ fc_lport_state(lport));
fc_lport_state_enter(lport, LPORT_ST_READY);
@@ -762,8 +753,8 @@ static void fc_lport_recv_flogi_req(struct fc_seq *sp_in,
u32 local_fid;
u32 f_ctl;
- FC_DEBUG_LPORT("Received FLOGI request while in state %s\n",
- fc_lport_state(lport));
+ FC_LPORT_DBG(lport, "Received FLOGI request while in state %s\n",
+ fc_lport_state(lport));
fh = fc_frame_header_get(rx_fp);
remote_fid = ntoh24(fh->fh_s_id);
@@ -772,12 +763,11 @@ static void fc_lport_recv_flogi_req(struct fc_seq *sp_in,
goto out;
remote_wwpn = get_unaligned_be64(&flp->fl_wwpn);
if (remote_wwpn == lport->wwpn) {
- FC_DBG("FLOGI from port with same WWPN %llx "
- "possible configuration error\n",
- (unsigned long long)remote_wwpn);
+ printk(KERN_WARNING "libfc: Received FLOGI from port "
+ "with same WWPN %llx\n", remote_wwpn);
goto out;
}
- FC_DBG("FLOGI from port WWPN %llx\n", (unsigned long long)remote_wwpn);
+ FC_LPORT_DBG(lport, "FLOGI from port WWPN %llx\n", remote_wwpn);
/*
* XXX what is the right thing to do for FIDs?
@@ -909,7 +899,8 @@ static void fc_lport_recv_req(struct fc_lport *lport, struct fc_seq *sp,
}
}
} else {
- FC_DBG("dropping invalid frame (eof %x)\n", fr_eof(fp));
+ FC_LPORT_DBG(lport, "dropping invalid frame (eof %x)\n",
+ fr_eof(fp));
fc_frame_free(fp);
}
mutex_unlock(&lport->lp_mutex);
@@ -947,8 +938,8 @@ EXPORT_SYMBOL(fc_lport_reset);
*/
static void fc_lport_enter_reset(struct fc_lport *lport)
{
- FC_DEBUG_LPORT("Port (%6x) entered RESET state from %s state\n",
- fc_host_port_id(lport->host), fc_lport_state(lport));
+ FC_LPORT_DBG(lport, "Entered RESET state from %s state\n",
+ fc_lport_state(lport));
fc_lport_state_enter(lport, LPORT_ST_RESET);
@@ -982,9 +973,9 @@ static void fc_lport_enter_reset(struct fc_lport *lport)
static void fc_lport_error(struct fc_lport *lport, struct fc_frame *fp)
{
unsigned long delay = 0;
- FC_DEBUG_LPORT("Error %ld in state %s, retries %d\n",
- PTR_ERR(fp), fc_lport_state(lport),
- lport->retry_count);
+ FC_LPORT_DBG(lport, "Error %ld in state %s, retries %d\n",
+ PTR_ERR(fp), fc_lport_state(lport),
+ lport->retry_count);
if (!fp || PTR_ERR(fp) == -FC_EX_TIMEOUT) {
/*
@@ -1040,11 +1031,11 @@ static void fc_lport_rft_id_resp(struct fc_seq *sp, struct fc_frame *fp,
mutex_lock(&lport->lp_mutex);
- FC_DEBUG_LPORT("Received a RFT_ID response\n");
+ FC_LPORT_DBG(lport, "Received a RFT_ID response\n");
if (lport->state != LPORT_ST_RFT_ID) {
- FC_DBG("Received a RFT_ID response, but in state %s\n",
- fc_lport_state(lport));
+ FC_LPORT_DBG(lport, "Received a RFT_ID response, but in state "
+ "%s\n", fc_lport_state(lport));
if (IS_ERR(fp))
goto err;
goto out;
@@ -1094,11 +1085,11 @@ static void fc_lport_rpn_id_resp(struct fc_seq *sp, struct fc_frame *fp,
mutex_lock(&lport->lp_mutex);
- FC_DEBUG_LPORT("Received a RPN_ID response\n");
+ FC_LPORT_DBG(lport, "Received a RPN_ID response\n");
if (lport->state != LPORT_ST_RPN_ID) {
- FC_DBG("Received a RPN_ID response, but in state %s\n",
- fc_lport_state(lport));
+ FC_LPORT_DBG(lport, "Received a RPN_ID response, but in state "
+ "%s\n", fc_lport_state(lport));
if (IS_ERR(fp))
goto err;
goto out;
@@ -1146,11 +1137,11 @@ static void fc_lport_scr_resp(struct fc_seq *sp, struct fc_frame *fp,
mutex_lock(&lport->lp_mutex);
- FC_DEBUG_LPORT("Received a SCR response\n");
+ FC_LPORT_DBG(lport, "Received a SCR response\n");
if (lport->state != LPORT_ST_SCR) {
- FC_DBG("Received a SCR response, but in state %s\n",
- fc_lport_state(lport));
+ FC_LPORT_DBG(lport, "Received a SCR response, but in state "
+ "%s\n", fc_lport_state(lport));
if (IS_ERR(fp))
goto err;
goto out;
@@ -1184,8 +1175,8 @@ static void fc_lport_enter_scr(struct fc_lport *lport)
{
struct fc_frame *fp;
- FC_DEBUG_LPORT("Port (%6x) entered SCR state from %s state\n",
- fc_host_port_id(lport->host), fc_lport_state(lport));
+ FC_LPORT_DBG(lport, "Entered SCR state from %s state\n",
+ fc_lport_state(lport));
fc_lport_state_enter(lport, LPORT_ST_SCR);
@@ -1213,8 +1204,8 @@ static void fc_lport_enter_rft_id(struct fc_lport *lport)
struct fc_ns_fts *lps;
int i;
- FC_DEBUG_LPORT("Port (%6x) entered RFT_ID state from %s state\n",
- fc_host_port_id(lport->host), fc_lport_state(lport));
+ FC_LPORT_DBG(lport, "Entered RFT_ID state from %s state\n",
+ fc_lport_state(lport));
fc_lport_state_enter(lport, LPORT_ST_RFT_ID);
@@ -1253,8 +1244,8 @@ static void fc_lport_enter_rpn_id(struct fc_lport *lport)
{
struct fc_frame *fp;
- FC_DEBUG_LPORT("Port (%6x) entered RPN_ID state from %s state\n",
- fc_host_port_id(lport->host), fc_lport_state(lport));
+ FC_LPORT_DBG(lport, "Entered RPN_ID state from %s state\n",
+ fc_lport_state(lport));
fc_lport_state_enter(lport, LPORT_ST_RPN_ID);
@@ -1294,8 +1285,8 @@ static void fc_lport_enter_dns(struct fc_lport *lport)
dp.ids.roles = FC_RPORT_ROLE_UNKNOWN;
dp.lp = lport;
- FC_DEBUG_LPORT("Port (%6x) entered DNS state from %s state\n",
- fc_host_port_id(lport->host), fc_lport_state(lport));
+ FC_LPORT_DBG(lport, "Entered DNS state from %s state\n",
+ fc_lport_state(lport));
fc_lport_state_enter(lport, LPORT_ST_DNS);
@@ -1374,11 +1365,11 @@ static void fc_lport_logo_resp(struct fc_seq *sp, struct fc_frame *fp,
mutex_lock(&lport->lp_mutex);
- FC_DEBUG_LPORT("Received a LOGO response\n");
+ FC_LPORT_DBG(lport, "Received a LOGO response\n");
if (lport->state != LPORT_ST_LOGO) {
- FC_DBG("Received a LOGO response, but in state %s\n",
- fc_lport_state(lport));
+ FC_LPORT_DBG(lport, "Received a LOGO response, but in state "
+ "%s\n", fc_lport_state(lport));
if (IS_ERR(fp))
goto err;
goto out;
@@ -1413,8 +1404,8 @@ static void fc_lport_enter_logo(struct fc_lport *lport)
struct fc_frame *fp;
struct fc_els_logo *logo;
- FC_DEBUG_LPORT("Port (%6x) entered LOGO state from %s state\n",
- fc_host_port_id(lport->host), fc_lport_state(lport));
+ FC_LPORT_DBG(lport, "Entered LOGO state from %s state\n",
+ fc_lport_state(lport));
fc_lport_state_enter(lport, LPORT_ST_LOGO);
@@ -1456,11 +1447,11 @@ static void fc_lport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp,
mutex_lock(&lport->lp_mutex);
- FC_DEBUG_LPORT("Received a FLOGI response\n");
+ FC_LPORT_DBG(lport, "Received a FLOGI response\n");
if (lport->state != LPORT_ST_FLOGI) {
- FC_DBG("Received a FLOGI response, but in state %s\n",
- fc_lport_state(lport));
+ FC_LPORT_DBG(lport, "Received a FLOGI response, but in state "
+ "%s\n", fc_lport_state(lport));
if (IS_ERR(fp))
goto err;
goto out;
@@ -1475,7 +1466,8 @@ static void fc_lport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp,
did = ntoh24(fh->fh_d_id);
if (fc_frame_payload_op(fp) == ELS_LS_ACC && did != 0) {
- FC_DEBUG_LPORT("Assigned fid %x\n", did);
+ printk(KERN_INFO "libfc: Assigned FID (%6x) in FLOGI response\n",
+ did);
fc_host_port_id(lport->host) = did;
flp = fc_frame_payload_get(fp, sizeof(*flp));
@@ -1494,7 +1486,8 @@ static void fc_lport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp,
if (e_d_tov > lport->e_d_tov)
lport->e_d_tov = e_d_tov;
lport->r_a_tov = 2 * e_d_tov;
- FC_DBG("Point-to-Point mode\n");
+ printk(KERN_INFO "libfc: Port (%6x) entered "
+ "point to point mode\n", did);
fc_lport_ptp_setup(lport, ntoh24(fh->fh_s_id),
get_unaligned_be64(
&flp->fl_wwpn),
@@ -1517,7 +1510,7 @@ static void fc_lport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp,
}
}
} else {
- FC_DBG("bad FLOGI response\n");
+ FC_LPORT_DBG(lport, "Bad FLOGI response\n");
}
out:
@@ -1537,7 +1530,8 @@ void fc_lport_enter_flogi(struct fc_lport *lport)
{
struct fc_frame *fp;
- FC_DEBUG_LPORT("Processing FLOGI state\n");
+ FC_LPORT_DBG(lport, "Entered FLOGI state from %s state\n",
+ fc_lport_state(lport));
fc_lport_state_enter(lport, LPORT_ST_FLOGI);
diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c
index 7bfbff7e0efb..7162385f52eb 100644
--- a/drivers/scsi/libfc/fc_rport.c
+++ b/drivers/scsi/libfc/fc_rport.c
@@ -55,14 +55,6 @@
#include <scsi/libfc.h>
#include <scsi/fc_encode.h>
-static int fc_rport_debug;
-
-#define FC_DEBUG_RPORT(fmt...) \
- do { \
- if (fc_rport_debug) \
- FC_DBG(fmt); \
- } while (0)
-
struct workqueue_struct *rport_event_queue;
static void fc_rport_enter_plogi(struct fc_rport *);
@@ -97,7 +89,7 @@ static const char *fc_rport_state_names[] = {
static void fc_rport_rogue_destroy(struct device *dev)
{
struct fc_rport *rport = dev_to_rport(dev);
- FC_DEBUG_RPORT("Destroying rogue rport (%6x)\n", rport->port_id);
+ FC_RPORT_DBG(rport, "Destroying rogue rport\n");
kfree(rport);
}
@@ -263,8 +255,8 @@ static void fc_rport_work(struct work_struct *work)
fc_rport_state_enter(new_rport, RPORT_ST_READY);
} else {
- FC_DBG("Failed to create the rport for port "
- "(%6x).\n", ids.port_id);
+ printk(KERN_WARNING "libfc: Failed to allocate "
+ " memory for rport (%6x)\n", ids.port_id);
event = RPORT_EV_FAILED;
}
if (rport->port_id != FC_FID_DIR_SERV)
@@ -309,7 +301,7 @@ int fc_rport_login(struct fc_rport *rport)
mutex_lock(&rdata->rp_mutex);
- FC_DEBUG_RPORT("Login to port (%6x)\n", rport->port_id);
+ FC_RPORT_DBG(rport, "Login to port\n");
fc_rport_enter_plogi(rport);
@@ -329,16 +321,13 @@ int fc_rport_login(struct fc_rport *rport)
int fc_rport_logoff(struct fc_rport *rport)
{
struct fc_rport_libfc_priv *rdata = rport->dd_data;
- struct fc_lport *lport = rdata->local_port;
mutex_lock(&rdata->rp_mutex);
- FC_DEBUG_RPORT("Remove port (%6x)\n", rport->port_id);
+ FC_RPORT_DBG(rport, "Remove port\n");
if (rdata->rp_state == RPORT_ST_NONE) {
- FC_DEBUG_RPORT("(%6x): Port (%6x) in NONE state,"
- " not removing", fc_host_port_id(lport->host),
- rport->port_id);
+ FC_RPORT_DBG(rport, "Port in NONE state, not removing\n");
mutex_unlock(&rdata->rp_mutex);
goto out;
}
@@ -379,7 +368,7 @@ static void fc_rport_enter_ready(struct fc_rport *rport)
fc_rport_state_enter(rport, RPORT_ST_READY);
- FC_DEBUG_RPORT("Port (%6x) is Ready\n", rport->port_id);
+ FC_RPORT_DBG(rport, "Port is Ready\n");
rdata->event = RPORT_EV_CREATED;
queue_work(rport_event_queue, &rdata->event_work);
@@ -436,8 +425,8 @@ static void fc_rport_error(struct fc_rport *rport, struct fc_frame *fp)
{
struct fc_rport_libfc_priv *rdata = rport->dd_data;
- FC_DEBUG_RPORT("Error %ld in state %s, retries %d\n",
- PTR_ERR(fp), fc_rport_state(rport), rdata->retries);
+ FC_RPORT_DBG(rport, "Error %ld in state %s, retries %d\n",
+ PTR_ERR(fp), fc_rport_state(rport), rdata->retries);
switch (rdata->rp_state) {
case RPORT_ST_PLOGI:
@@ -479,8 +468,8 @@ static void fc_rport_error_retry(struct fc_rport *rport, struct fc_frame *fp)
return fc_rport_error(rport, fp);
if (rdata->retries < rdata->local_port->max_rport_retry_count) {
- FC_DEBUG_RPORT("Error %ld in state %s, retrying\n",
- PTR_ERR(fp), fc_rport_state(rport));
+ FC_RPORT_DBG(rport, "Error %ld in state %s, retrying\n",
+ PTR_ERR(fp), fc_rport_state(rport));
rdata->retries++;
/* no additional delay on exchange timeouts */
if (PTR_ERR(fp) == -FC_EX_TIMEOUT)
@@ -517,12 +506,11 @@ static void fc_rport_plogi_resp(struct fc_seq *sp, struct fc_frame *fp,
mutex_lock(&rdata->rp_mutex);
- FC_DEBUG_RPORT("Received a PLOGI response from port (%6x)\n",
- rport->port_id);
+ FC_RPORT_DBG(rport, "Received a PLOGI response\n");
if (rdata->rp_state != RPORT_ST_PLOGI) {
- FC_DBG("Received a PLOGI response, but in state %s\n",
- fc_rport_state(rport));
+ FC_RPORT_DBG(rport, "Received a PLOGI response, but in state "
+ "%s\n", fc_rport_state(rport));
if (IS_ERR(fp))
goto err;
goto out;
@@ -583,8 +571,8 @@ static void fc_rport_enter_plogi(struct fc_rport *rport)
struct fc_lport *lport = rdata->local_port;
struct fc_frame *fp;
- FC_DEBUG_RPORT("Port (%6x) entered PLOGI state from %s state\n",
- rport->port_id, fc_rport_state(rport));
+ FC_RPORT_DBG(rport, "Port entered PLOGI state from %s state\n",
+ fc_rport_state(rport));
fc_rport_state_enter(rport, RPORT_ST_PLOGI);
@@ -628,12 +616,11 @@ static void fc_rport_prli_resp(struct fc_seq *sp, struct fc_frame *fp,
mutex_lock(&rdata->rp_mutex);
- FC_DEBUG_RPORT("Received a PRLI response from port (%6x)\n",
- rport->port_id);
+ FC_RPORT_DBG(rport, "Received a PRLI response\n");
if (rdata->rp_state != RPORT_ST_PRLI) {
- FC_DBG("Received a PRLI response, but in state %s\n",
- fc_rport_state(rport));
+ FC_RPORT_DBG(rport, "Received a PRLI response, but in state "
+ "%s\n", fc_rport_state(rport));
if (IS_ERR(fp))
goto err;
goto out;
@@ -663,7 +650,7 @@ static void fc_rport_prli_resp(struct fc_seq *sp, struct fc_frame *fp,
fc_rport_enter_rtv(rport);
} else {
- FC_DBG("Bad ELS response\n");
+ FC_RPORT_DBG(rport, "Bad ELS response for PRLI command\n");
rdata->event = RPORT_EV_FAILED;
fc_rport_state_enter(rport, RPORT_ST_NONE);
queue_work(rport_event_queue, &rdata->event_work);
@@ -695,12 +682,11 @@ static void fc_rport_logo_resp(struct fc_seq *sp, struct fc_frame *fp,
mutex_lock(&rdata->rp_mutex);
- FC_DEBUG_RPORT("Received a LOGO response from port (%6x)\n",
- rport->port_id);
+ FC_RPORT_DBG(rport, "Received a LOGO response\n");
if (rdata->rp_state != RPORT_ST_LOGO) {
- FC_DEBUG_RPORT("Received a LOGO response, but in state %s\n",
- fc_rport_state(rport));
+ FC_RPORT_DBG(rport, "Received a LOGO response, but in state "
+ "%s\n", fc_rport_state(rport));
if (IS_ERR(fp))
goto err;
goto out;
@@ -715,7 +701,7 @@ static void fc_rport_logo_resp(struct fc_seq *sp, struct fc_frame *fp,
if (op == ELS_LS_ACC) {
fc_rport_enter_rtv(rport);
} else {
- FC_DBG("Bad ELS response\n");
+ FC_RPORT_DBG(rport, "Bad ELS response for LOGO command\n");
rdata->event = RPORT_EV_LOGO;
fc_rport_state_enter(rport, RPORT_ST_NONE);
queue_work(rport_event_queue, &rdata->event_work);
@@ -745,8 +731,8 @@ static void fc_rport_enter_prli(struct fc_rport *rport)
} *pp;
struct fc_frame *fp;
- FC_DEBUG_RPORT("Port (%6x) entered PRLI state from %s state\n",
- rport->port_id, fc_rport_state(rport));
+ FC_RPORT_DBG(rport, "Port entered PRLI state from %s state\n",
+ fc_rport_state(rport));
fc_rport_state_enter(rport, RPORT_ST_PRLI);
@@ -784,12 +770,11 @@ static void fc_rport_rtv_resp(struct fc_seq *sp, struct fc_frame *fp,
mutex_lock(&rdata->rp_mutex);
- FC_DEBUG_RPORT("Received a RTV response from port (%6x)\n",
- rport->port_id);
+ FC_RPORT_DBG(rport, "Received a RTV response\n");
if (rdata->rp_state != RPORT_ST_RTV) {
- FC_DBG("Received a RTV response, but in state %s\n",
- fc_rport_state(rport));
+ FC_RPORT_DBG(rport, "Received a RTV response, but in state "
+ "%s\n", fc_rport_state(rport));
if (IS_ERR(fp))
goto err;
goto out;
@@ -844,8 +829,8 @@ static void fc_rport_enter_rtv(struct fc_rport *rport)
struct fc_rport_libfc_priv *rdata = rport->dd_data;
struct fc_lport *lport = rdata->local_port;
- FC_DEBUG_RPORT("Port (%6x) entered RTV state from %s state\n",
- rport->port_id, fc_rport_state(rport));
+ FC_RPORT_DBG(rport, "Port entered RTV state from %s state\n",
+ fc_rport_state(rport));
fc_rport_state_enter(rport, RPORT_ST_RTV);
@@ -875,8 +860,8 @@ static void fc_rport_enter_logo(struct fc_rport *rport)
struct fc_lport *lport = rdata->local_port;
struct fc_frame *fp;
- FC_DEBUG_RPORT("Port (%6x) entered LOGO state from %s state\n",
- rport->port_id, fc_rport_state(rport));
+ FC_RPORT_DBG(rport, "Port entered LOGO state from %s state\n",
+ fc_rport_state(rport));
fc_rport_state_enter(rport, RPORT_ST_LOGO);
@@ -983,14 +968,13 @@ static void fc_rport_recv_plogi_req(struct fc_rport *rport,
fh = fc_frame_header_get(fp);
- FC_DEBUG_RPORT("Received PLOGI request from port (%6x) "
- "while in state %s\n", ntoh24(fh->fh_s_id),
- fc_rport_state(rport));
+ FC_RPORT_DBG(rport, "Received PLOGI request while in state %s\n",
+ fc_rport_state(rport));
sid = ntoh24(fh->fh_s_id);
pl = fc_frame_payload_get(fp, sizeof(*pl));
if (!pl) {
- FC_DBG("incoming PLOGI from %x too short\n", sid);
+ FC_RPORT_DBG(rport, "Received PLOGI too short\n");
WARN_ON(1);
/* XXX TBD: send reject? */
fc_frame_free(fp);
@@ -1012,26 +996,26 @@ static void fc_rport_recv_plogi_req(struct fc_rport *rport,
*/
switch (rdata->rp_state) {
case RPORT_ST_INIT:
- FC_DEBUG_RPORT("incoming PLOGI from %6x wwpn %llx state INIT "
- "- reject\n", sid, (unsigned long long)wwpn);
+ FC_RPORT_DBG(rport, "Received PLOGI, wwpn %llx state INIT "
+ "- reject\n", (unsigned long long)wwpn);
reject = ELS_RJT_UNSUP;
break;
case RPORT_ST_PLOGI:
- FC_DEBUG_RPORT("incoming PLOGI from %x in PLOGI state %d\n",
- sid, rdata->rp_state);
+ FC_RPORT_DBG(rport, "Received PLOGI in PLOGI state %d\n",
+ rdata->rp_state);
if (wwpn < lport->wwpn)
reject = ELS_RJT_INPROG;
break;
case RPORT_ST_PRLI:
case RPORT_ST_READY:
- FC_DEBUG_RPORT("incoming PLOGI from %x in logged-in state %d "
- "- ignored for now\n", sid, rdata->rp_state);
+ FC_RPORT_DBG(rport, "Received PLOGI in logged-in state %d "
+ "- ignored for now\n", rdata->rp_state);
/* XXX TBD - should reset */
break;
case RPORT_ST_NONE:
default:
- FC_DEBUG_RPORT("incoming PLOGI from %x in unexpected "
- "state %d\n", sid, rdata->rp_state);
+ FC_RPORT_DBG(rport, "Received PLOGI in unexpected "
+ "state %d\n", rdata->rp_state);
fc_frame_free(fp);
return;
break;
@@ -1115,9 +1099,8 @@ static void fc_rport_recv_prli_req(struct fc_rport *rport,
fh = fc_frame_header_get(rx_fp);
- FC_DEBUG_RPORT("Received PRLI request from port (%6x) "
- "while in state %s\n", ntoh24(fh->fh_s_id),
- fc_rport_state(rport));
+ FC_RPORT_DBG(rport, "Received PRLI request while in state %s\n",
+ fc_rport_state(rport));
switch (rdata->rp_state) {
case RPORT_ST_PRLI:
@@ -1252,9 +1235,8 @@ static void fc_rport_recv_prlo_req(struct fc_rport *rport, struct fc_seq *sp,
fh = fc_frame_header_get(fp);
- FC_DEBUG_RPORT("Received PRLO request from port (%6x) "
- "while in state %s\n", ntoh24(fh->fh_s_id),
- fc_rport_state(rport));
+ FC_RPORT_DBG(rport, "Received PRLO request while in state %s\n",
+ fc_rport_state(rport));
if (rdata->rp_state == RPORT_ST_NONE) {
fc_frame_free(fp);
@@ -1286,9 +1268,8 @@ static void fc_rport_recv_logo_req(struct fc_rport *rport, struct fc_seq *sp,
fh = fc_frame_header_get(fp);
- FC_DEBUG_RPORT("Received LOGO request from port (%6x) "
- "while in state %s\n", ntoh24(fh->fh_s_id),
- fc_rport_state(rport));
+ FC_RPORT_DBG(rport, "Received LOGO request while in state %s\n",
+ fc_rport_state(rport));
if (rdata->rp_state == RPORT_ST_NONE) {
fc_frame_free(fp);
@@ -1308,7 +1289,6 @@ static void fc_rport_flush_queue(void)
flush_workqueue(rport_event_queue);
}
-
int fc_rport_init(struct fc_lport *lport)
{
if (!lport->tt.rport_create)
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index 59908aead531..716cc344c5df 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -38,15 +38,30 @@
#include <scsi/scsi_transport_iscsi.h>
#include <scsi/libiscsi.h>
-static int iscsi_dbg_lib;
-module_param_named(debug_libiscsi, iscsi_dbg_lib, int, S_IRUGO | S_IWUSR);
-MODULE_PARM_DESC(debug_libiscsi, "Turn on debugging for libiscsi module. "
- "Set to 1 to turn on, and zero to turn off. Default "
- "is off.");
+static int iscsi_dbg_lib_conn;
+module_param_named(debug_libiscsi_conn, iscsi_dbg_lib_conn, int,
+ S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(debug_libiscsi_conn,
+ "Turn on debugging for connections in libiscsi module. "
+ "Set to 1 to turn on, and zero to turn off. Default is off.");
+
+static int iscsi_dbg_lib_session;
+module_param_named(debug_libiscsi_session, iscsi_dbg_lib_session, int,
+ S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(debug_libiscsi_session,
+ "Turn on debugging for sessions in libiscsi module. "
+ "Set to 1 to turn on, and zero to turn off. Default is off.");
+
+static int iscsi_dbg_lib_eh;
+module_param_named(debug_libiscsi_eh, iscsi_dbg_lib_eh, int,
+ S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(debug_libiscsi_eh,
+ "Turn on debugging for error handling in libiscsi module. "
+ "Set to 1 to turn on, and zero to turn off. Default is off.");
#define ISCSI_DBG_CONN(_conn, dbg_fmt, arg...) \
do { \
- if (iscsi_dbg_lib) \
+ if (iscsi_dbg_lib_conn) \
iscsi_conn_printk(KERN_INFO, _conn, \
"%s " dbg_fmt, \
__func__, ##arg); \
@@ -54,7 +69,15 @@ MODULE_PARM_DESC(debug_libiscsi, "Turn on debugging for libiscsi module. "
#define ISCSI_DBG_SESSION(_session, dbg_fmt, arg...) \
do { \
- if (iscsi_dbg_lib) \
+ if (iscsi_dbg_lib_session) \
+ iscsi_session_printk(KERN_INFO, _session, \
+ "%s " dbg_fmt, \
+ __func__, ##arg); \
+ } while (0);
+
+#define ISCSI_DBG_EH(_session, dbg_fmt, arg...) \
+ do { \
+ if (iscsi_dbg_lib_eh) \
iscsi_session_printk(KERN_INFO, _session, \
"%s " dbg_fmt, \
__func__, ##arg); \
@@ -954,6 +977,7 @@ int __iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
task = iscsi_itt_to_ctask(conn, hdr->itt);
if (!task)
return ISCSI_ERR_BAD_ITT;
+ task->last_xfer = jiffies;
break;
case ISCSI_OP_R2T:
/*
@@ -1192,10 +1216,12 @@ static int iscsi_xmit_task(struct iscsi_conn *conn)
spin_unlock_bh(&conn->session->lock);
rc = conn->session->tt->xmit_task(task);
spin_lock_bh(&conn->session->lock);
- __iscsi_put_task(task);
- if (!rc)
+ if (!rc) {
/* done with this task */
+ task->last_xfer = jiffies;
conn->task = NULL;
+ }
+ __iscsi_put_task(task);
return rc;
}
@@ -1361,6 +1387,9 @@ static inline struct iscsi_task *iscsi_alloc_task(struct iscsi_conn *conn,
task->state = ISCSI_TASK_PENDING;
task->conn = conn;
task->sc = sc;
+ task->have_checked_conn = false;
+ task->last_timeout = jiffies;
+ task->last_xfer = jiffies;
INIT_LIST_HEAD(&task->running);
return task;
}
@@ -1555,10 +1584,10 @@ int iscsi_eh_target_reset(struct scsi_cmnd *sc)
spin_lock_bh(&session->lock);
if (session->state == ISCSI_STATE_TERMINATE) {
failed:
- iscsi_session_printk(KERN_INFO, session,
- "failing target reset: Could not log "
- "back into target [age %d]\n",
- session->age);
+ ISCSI_DBG_EH(session,
+ "failing target reset: Could not log back into "
+ "target [age %d]\n",
+ session->age);
spin_unlock_bh(&session->lock);
mutex_unlock(&session->eh_mutex);
return FAILED;
@@ -1572,7 +1601,7 @@ failed:
*/
iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
- ISCSI_DBG_SESSION(session, "wait for relogin\n");
+ ISCSI_DBG_EH(session, "wait for relogin\n");
wait_event_interruptible(conn->ehwait,
session->state == ISCSI_STATE_TERMINATE ||
session->state == ISCSI_STATE_LOGGED_IN ||
@@ -1582,10 +1611,10 @@ failed:
mutex_lock(&session->eh_mutex);
spin_lock_bh(&session->lock);
- if (session->state == ISCSI_STATE_LOGGED_IN)
- iscsi_session_printk(KERN_INFO, session,
- "target reset succeeded\n");
- else
+ if (session->state == ISCSI_STATE_LOGGED_IN) {
+ ISCSI_DBG_EH(session,
+ "target reset succeeded\n");
+ } else
goto failed;
spin_unlock_bh(&session->lock);
mutex_unlock(&session->eh_mutex);
@@ -1601,7 +1630,7 @@ static void iscsi_tmf_timedout(unsigned long data)
spin_lock(&session->lock);
if (conn->tmf_state == TMF_QUEUED) {
conn->tmf_state = TMF_TIMEDOUT;
- ISCSI_DBG_SESSION(session, "tmf timedout\n");
+ ISCSI_DBG_EH(session, "tmf timedout\n");
/* unblock eh_abort() */
wake_up(&conn->ehwait);
}
@@ -1621,7 +1650,7 @@ static int iscsi_exec_task_mgmt_fn(struct iscsi_conn *conn,
spin_unlock_bh(&session->lock);
iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
spin_lock_bh(&session->lock);
- ISCSI_DBG_SESSION(session, "tmf exec failure\n");
+ ISCSI_DBG_EH(session, "tmf exec failure\n");
return -EPERM;
}
conn->tmfcmd_pdus_cnt++;
@@ -1629,7 +1658,7 @@ static int iscsi_exec_task_mgmt_fn(struct iscsi_conn *conn,
conn->tmf_timer.function = iscsi_tmf_timedout;
conn->tmf_timer.data = (unsigned long)conn;
add_timer(&conn->tmf_timer);
- ISCSI_DBG_SESSION(session, "tmf set timeout\n");
+ ISCSI_DBG_EH(session, "tmf set timeout\n");
spin_unlock_bh(&session->lock);
mutex_unlock(&session->eh_mutex);
@@ -1716,17 +1745,18 @@ static int iscsi_has_ping_timed_out(struct iscsi_conn *conn)
return 0;
}
-static enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *scmd)
+static enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *sc)
{
+ enum blk_eh_timer_return rc = BLK_EH_NOT_HANDLED;
+ struct iscsi_task *task = NULL;
struct iscsi_cls_session *cls_session;
struct iscsi_session *session;
struct iscsi_conn *conn;
- enum blk_eh_timer_return rc = BLK_EH_NOT_HANDLED;
- cls_session = starget_to_session(scsi_target(scmd->device));
+ cls_session = starget_to_session(scsi_target(sc->device));
session = cls_session->dd_data;
- ISCSI_DBG_SESSION(session, "scsi cmd %p timedout\n", scmd);
+ ISCSI_DBG_EH(session, "scsi cmd %p timedout\n", sc);
spin_lock(&session->lock);
if (session->state != ISCSI_STATE_LOGGED_IN) {
@@ -1745,6 +1775,26 @@ static enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *scmd)
goto done;
}
+ task = (struct iscsi_task *)sc->SCp.ptr;
+ if (!task)
+ goto done;
+ /*
+ * If we have sent (at least queued to the network layer) a pdu or
+ * recvd one for the task since the last timeout ask for
+ * more time. If on the next timeout we have not made progress
+ * we can check if it is the task or connection when we send the
+ * nop as a ping.
+ */
+ if (time_after_eq(task->last_xfer, task->last_timeout)) {
+ ISCSI_DBG_EH(session, "Command making progress. Asking "
+ "scsi-ml for more time to complete. "
+ "Last data recv at %lu. Last timeout was at "
+ "%lu\n.", task->last_xfer, task->last_timeout);
+ task->have_checked_conn = false;
+ rc = BLK_EH_RESET_TIMER;
+ goto done;
+ }
+
if (!conn->recv_timeout && !conn->ping_timeout)
goto done;
/*
@@ -1755,23 +1805,32 @@ static enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *scmd)
rc = BLK_EH_RESET_TIMER;
goto done;
}
+
+ /* Assumes nop timeout is shorter than scsi cmd timeout */
+ if (task->have_checked_conn)
+ goto done;
+
/*
- * if we are about to check the transport then give the command
- * more time
+ * Checking the transport already or nop from a cmd timeout still
+ * running
*/
- if (time_before_eq(conn->last_recv + (conn->recv_timeout * HZ),
- jiffies)) {
+ if (conn->ping_task) {
+ task->have_checked_conn = true;
rc = BLK_EH_RESET_TIMER;
goto done;
}
- /* if in the middle of checking the transport then give us more time */
- if (conn->ping_task)
- rc = BLK_EH_RESET_TIMER;
+ /* Make sure there is a transport check done */
+ iscsi_send_nopout(conn, NULL);
+ task->have_checked_conn = true;
+ rc = BLK_EH_RESET_TIMER;
+
done:
+ if (task)
+ task->last_timeout = jiffies;
spin_unlock(&session->lock);
- ISCSI_DBG_SESSION(session, "return %s\n", rc == BLK_EH_RESET_TIMER ?
- "timer reset" : "nh");
+ ISCSI_DBG_EH(session, "return %s\n", rc == BLK_EH_RESET_TIMER ?
+ "timer reset" : "nh");
return rc;
}
@@ -1841,7 +1900,7 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)
cls_session = starget_to_session(scsi_target(sc->device));
session = cls_session->dd_data;
- ISCSI_DBG_SESSION(session, "aborting sc %p\n", sc);
+ ISCSI_DBG_EH(session, "aborting sc %p\n", sc);
mutex_lock(&session->eh_mutex);
spin_lock_bh(&session->lock);
@@ -1850,8 +1909,8 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)
* got the command.
*/
if (!sc->SCp.ptr) {
- ISCSI_DBG_SESSION(session, "sc never reached iscsi layer or "
- "it completed.\n");
+ ISCSI_DBG_EH(session, "sc never reached iscsi layer or "
+ "it completed.\n");
spin_unlock_bh(&session->lock);
mutex_unlock(&session->eh_mutex);
return SUCCESS;
@@ -1865,7 +1924,7 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)
sc->SCp.phase != session->age) {
spin_unlock_bh(&session->lock);
mutex_unlock(&session->eh_mutex);
- ISCSI_DBG_SESSION(session, "failing abort due to dropped "
+ ISCSI_DBG_EH(session, "failing abort due to dropped "
"session.\n");
return FAILED;
}
@@ -1875,13 +1934,12 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)
age = session->age;
task = (struct iscsi_task *)sc->SCp.ptr;
- ISCSI_DBG_SESSION(session, "aborting [sc %p itt 0x%x]\n",
- sc, task->itt);
+ ISCSI_DBG_EH(session, "aborting [sc %p itt 0x%x]\n",
+ sc, task->itt);
/* task completed before time out */
if (!task->sc) {
- ISCSI_DBG_SESSION(session, "sc completed while abort in "
- "progress\n");
+ ISCSI_DBG_EH(session, "sc completed while abort in progress\n");
goto success;
}
@@ -1930,8 +1988,8 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)
if (!sc->SCp.ptr) {
conn->tmf_state = TMF_INITIAL;
/* task completed before tmf abort response */
- ISCSI_DBG_SESSION(session, "sc completed while abort "
- "in progress\n");
+ ISCSI_DBG_EH(session, "sc completed while abort in "
+ "progress\n");
goto success;
}
/* fall through */
@@ -1943,16 +2001,16 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)
success:
spin_unlock_bh(&session->lock);
success_unlocked:
- ISCSI_DBG_SESSION(session, "abort success [sc %p itt 0x%x]\n",
- sc, task->itt);
+ ISCSI_DBG_EH(session, "abort success [sc %p itt 0x%x]\n",
+ sc, task->itt);
mutex_unlock(&session->eh_mutex);
return SUCCESS;
failed:
spin_unlock_bh(&session->lock);
failed_unlocked:
- ISCSI_DBG_SESSION(session, "abort failed [sc %p itt 0x%x]\n", sc,
- task ? task->itt : 0);
+ ISCSI_DBG_EH(session, "abort failed [sc %p itt 0x%x]\n", sc,
+ task ? task->itt : 0);
mutex_unlock(&session->eh_mutex);
return FAILED;
}
@@ -1979,8 +2037,7 @@ int iscsi_eh_device_reset(struct scsi_cmnd *sc)
cls_session = starget_to_session(scsi_target(sc->device));
session = cls_session->dd_data;
- ISCSI_DBG_SESSION(session, "LU Reset [sc %p lun %u]\n",
- sc, sc->device->lun);
+ ISCSI_DBG_EH(session, "LU Reset [sc %p lun %u]\n", sc, sc->device->lun);
mutex_lock(&session->eh_mutex);
spin_lock_bh(&session->lock);
@@ -2034,8 +2091,8 @@ int iscsi_eh_device_reset(struct scsi_cmnd *sc)
unlock:
spin_unlock_bh(&session->lock);
done:
- ISCSI_DBG_SESSION(session, "dev reset result = %s\n",
- rc == SUCCESS ? "SUCCESS" : "FAILED");
+ ISCSI_DBG_EH(session, "dev reset result = %s\n",
+ rc == SUCCESS ? "SUCCESS" : "FAILED");
mutex_unlock(&session->eh_mutex);
return rc;
}
diff --git a/drivers/scsi/libiscsi_tcp.c b/drivers/scsi/libiscsi_tcp.c
index 2bc07090321d..2e0746d70303 100644
--- a/drivers/scsi/libiscsi_tcp.c
+++ b/drivers/scsi/libiscsi_tcp.c
@@ -686,6 +686,7 @@ iscsi_tcp_hdr_dissect(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
"offset=%d, datalen=%d)\n",
tcp_task->data_offset,
tcp_conn->in.datalen);
+ task->last_xfer = jiffies;
rc = iscsi_segment_seek_sg(&tcp_conn->in.segment,
sdb->table.sgl,
sdb->table.nents,
@@ -713,9 +714,10 @@ iscsi_tcp_hdr_dissect(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
rc = ISCSI_ERR_BAD_ITT;
else if (ahslen)
rc = ISCSI_ERR_AHSLEN;
- else if (task->sc->sc_data_direction == DMA_TO_DEVICE)
+ else if (task->sc->sc_data_direction == DMA_TO_DEVICE) {
+ task->last_xfer = jiffies;
rc = iscsi_tcp_r2t_rsp(conn, task);
- else
+ } else
rc = ISCSI_ERR_PROTO;
spin_unlock(&conn->session->lock);
break;
diff --git a/drivers/scsi/mac53c94.c b/drivers/scsi/mac53c94.c
index b12ad7c7c673..18735b39b3d3 100644
--- a/drivers/scsi/mac53c94.c
+++ b/drivers/scsi/mac53c94.c
@@ -75,8 +75,9 @@ static int mac53c94_queue(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *
int i;
printk(KERN_DEBUG "mac53c94_queue %p: command is", cmd);
for (i = 0; i < cmd->cmd_len; ++i)
- printk(" %.2x", cmd->cmnd[i]);
- printk("\n" KERN_DEBUG "use_sg=%d request_bufflen=%d request_buffer=%p\n",
+ printk(KERN_CONT " %.2x", cmd->cmnd[i]);
+ printk(KERN_CONT "\n");
+ printk(KERN_DEBUG "use_sg=%d request_bufflen=%d request_buffer=%p\n",
scsi_sg_count(cmd), scsi_bufflen(cmd), scsi_sglist(cmd));
}
#endif
diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c
index 4a990f4da4ea..cca8e4ab0372 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.c
+++ b/drivers/scsi/qla2xxx/qla_dbg.c
@@ -216,7 +216,7 @@ qla24xx_soft_reset(struct qla_hw_data *ha)
static int
qla2xxx_dump_ram(struct qla_hw_data *ha, uint32_t addr, uint16_t *ram,
- uint16_t ram_words, void **nxt)
+ uint32_t ram_words, void **nxt)
{
int rval;
uint32_t cnt, stat, timer, words, idx;
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 262026129325..f2ce8e3cc91b 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -2301,7 +2301,7 @@ qla2x00_iidma_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
static char *link_speeds[] = { "1", "2", "?", "4", "8", "10" };
char *link_speed;
int rval;
- uint16_t mb[6];
+ uint16_t mb[4];
struct qla_hw_data *ha = vha->hw;
if (!IS_IIDMA_CAPABLE(ha))
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index 451ece0760b0..fe69f3057671 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -1267,17 +1267,22 @@ qla2x00_get_firmware_state(scsi_qla_host_t *vha, uint16_t *states)
mcp->mb[0] = MBC_GET_FIRMWARE_STATE;
mcp->out_mb = MBX_0;
- mcp->in_mb = MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
+ if (IS_FWI2_CAPABLE(vha->hw))
+ mcp->in_mb = MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
+ else
+ mcp->in_mb = MBX_1|MBX_0;
mcp->tov = MBX_TOV_SECONDS;
mcp->flags = 0;
rval = qla2x00_mailbox_command(vha, mcp);
/* Return firmware states. */
states[0] = mcp->mb[1];
- states[1] = mcp->mb[2];
- states[2] = mcp->mb[3];
- states[3] = mcp->mb[4];
- states[4] = mcp->mb[5];
+ if (IS_FWI2_CAPABLE(vha->hw)) {
+ states[1] = mcp->mb[2];
+ states[2] = mcp->mb[3];
+ states[3] = mcp->mb[4];
+ states[4] = mcp->mb[5];
+ }
if (rval != QLA_SUCCESS) {
/*EMPTY*/
@@ -2697,10 +2702,13 @@ qla2x00_set_idma_speed(scsi_qla_host_t *vha, uint16_t loop_id,
mcp->mb[0] = MBC_PORT_PARAMS;
mcp->mb[1] = loop_id;
mcp->mb[2] = BIT_0;
- mcp->mb[3] = port_speed & (BIT_2|BIT_1|BIT_0);
- mcp->mb[4] = mcp->mb[5] = 0;
- mcp->out_mb = MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
- mcp->in_mb = MBX_5|MBX_4|MBX_3|MBX_1|MBX_0;
+ if (IS_QLA81XX(vha->hw))
+ mcp->mb[3] = port_speed & (BIT_5|BIT_4|BIT_3|BIT_2|BIT_1|BIT_0);
+ else
+ mcp->mb[3] = port_speed & (BIT_2|BIT_1|BIT_0);
+ mcp->mb[9] = vha->vp_idx;
+ mcp->out_mb = MBX_9|MBX_3|MBX_2|MBX_1|MBX_0;
+ mcp->in_mb = MBX_3|MBX_1|MBX_0;
mcp->tov = MBX_TOV_SECONDS;
mcp->flags = 0;
rval = qla2x00_mailbox_command(vha, mcp);
@@ -2710,8 +2718,6 @@ qla2x00_set_idma_speed(scsi_qla_host_t *vha, uint16_t loop_id,
mb[0] = mcp->mb[0];
mb[1] = mcp->mb[1];
mb[3] = mcp->mb[3];
- mb[4] = mcp->mb[4];
- mb[5] = mcp->mb[5];
}
if (rval != QLA_SUCCESS) {
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index dcf011679c8b..f0396e79b6fa 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -1663,7 +1663,7 @@ skip_pio:
/* queue 0 uses two msix vectors */
if (ql2xmultique_tag) {
cpus = num_online_cpus();
- ha->max_rsp_queues = (ha->msix_count - 1 - cpus) ?
+ ha->max_rsp_queues = (ha->msix_count - 1 > cpus) ?
(cpus + 1) : (ha->msix_count - 1);
ha->max_req_queues = 2;
} else if (ql2xmaxqueues > 1) {
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h
index b63feaf43126..84369705a9ad 100644
--- a/drivers/scsi/qla2xxx/qla_version.h
+++ b/drivers/scsi/qla2xxx/qla_version.h
@@ -7,7 +7,7 @@
/*
* Driver version
*/
-#define QLA2XXX_VERSION "8.03.01-k3"
+#define QLA2XXX_VERSION "8.03.01-k4"
#define QLA_DRIVER_MAJOR_VER 8
#define QLA_DRIVER_MINOR_VER 3
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index 41a21772df12..fb9af207d61d 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -101,6 +101,8 @@ static const char * scsi_debug_version_date = "20070104";
#define DEF_DIF 0
#define DEF_GUARD 0
#define DEF_ATO 1
+#define DEF_PHYSBLK_EXP 0
+#define DEF_LOWEST_ALIGNED 0
/* bit mask values for scsi_debug_opts */
#define SCSI_DEBUG_OPT_NOISE 1
@@ -156,6 +158,8 @@ static int scsi_debug_dix = DEF_DIX;
static int scsi_debug_dif = DEF_DIF;
static int scsi_debug_guard = DEF_GUARD;
static int scsi_debug_ato = DEF_ATO;
+static int scsi_debug_physblk_exp = DEF_PHYSBLK_EXP;
+static int scsi_debug_lowest_aligned = DEF_LOWEST_ALIGNED;
static int scsi_debug_cmnd_count = 0;
@@ -657,7 +661,12 @@ static unsigned char vpdb0_data[] = {
static int inquiry_evpd_b0(unsigned char * arr)
{
+ unsigned int gran;
+
memcpy(arr, vpdb0_data, sizeof(vpdb0_data));
+ gran = 1 << scsi_debug_physblk_exp;
+ arr[2] = (gran >> 8) & 0xff;
+ arr[3] = gran & 0xff;
if (sdebug_store_sectors > 0x400) {
arr[4] = (sdebug_store_sectors >> 24) & 0xff;
arr[5] = (sdebug_store_sectors >> 16) & 0xff;
@@ -945,6 +954,9 @@ static int resp_readcap16(struct scsi_cmnd * scp,
arr[9] = (scsi_debug_sector_size >> 16) & 0xff;
arr[10] = (scsi_debug_sector_size >> 8) & 0xff;
arr[11] = scsi_debug_sector_size & 0xff;
+ arr[13] = scsi_debug_physblk_exp & 0xf;
+ arr[14] = (scsi_debug_lowest_aligned >> 8) & 0x3f;
+ arr[15] = scsi_debug_lowest_aligned & 0xff;
if (scsi_debug_dif) {
arr[12] = (scsi_debug_dif - 1) << 1; /* P_TYPE */
@@ -2380,6 +2392,8 @@ module_param_named(dix, scsi_debug_dix, int, S_IRUGO);
module_param_named(dif, scsi_debug_dif, int, S_IRUGO);
module_param_named(guard, scsi_debug_guard, int, S_IRUGO);
module_param_named(ato, scsi_debug_ato, int, S_IRUGO);
+module_param_named(physblk_exp, scsi_debug_physblk_exp, int, S_IRUGO);
+module_param_named(lowest_aligned, scsi_debug_lowest_aligned, int, S_IRUGO);
MODULE_AUTHOR("Eric Youngdale + Douglas Gilbert");
MODULE_DESCRIPTION("SCSI debug adapter driver");
@@ -2401,7 +2415,9 @@ MODULE_PARM_DESC(ptype, "SCSI peripheral type(def=0[disk])");
MODULE_PARM_DESC(scsi_level, "SCSI level to simulate(def=5[SPC-3])");
MODULE_PARM_DESC(virtual_gb, "virtual gigabyte size (def=0 -> use dev_size_mb)");
MODULE_PARM_DESC(vpd_use_hostno, "0 -> dev ids ignore hostno (def=1 -> unique dev ids)");
-MODULE_PARM_DESC(sector_size, "hardware sector size in bytes (def=512)");
+MODULE_PARM_DESC(sector_size, "logical block size in bytes (def=512)");
+MODULE_PARM_DESC(physblk_exp, "physical block exponent (def=0)");
+MODULE_PARM_DESC(lowest_aligned, "lowest aligned lba (def=0)");
MODULE_PARM_DESC(dix, "data integrity extensions mask (def=0)");
MODULE_PARM_DESC(dif, "data integrity field type: 0-3 (def=0)");
MODULE_PARM_DESC(guard, "protection checksum: 0=crc, 1=ip (def=0)");
@@ -2874,6 +2890,18 @@ static int __init scsi_debug_init(void)
return -EINVAL;
}
+ if (scsi_debug_physblk_exp > 15) {
+ printk(KERN_ERR "scsi_debug_init: invalid physblk_exp %u\n",
+ scsi_debug_physblk_exp);
+ return -EINVAL;
+ }
+
+ if (scsi_debug_lowest_aligned > 0x3fff) {
+ printk(KERN_ERR "scsi_debug_init: lowest_aligned too big: %u\n",
+ scsi_debug_lowest_aligned);
+ return -EINVAL;
+ }
+
if (scsi_debug_dev_size_mb < 1)
scsi_debug_dev_size_mb = 1; /* force minimum 1 MB ramdisk */
sz = (unsigned long)scsi_debug_dev_size_mb * 1048576;
diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c
index 8821df9a277b..93c2622cb969 100644
--- a/drivers/scsi/scsi_devinfo.c
+++ b/drivers/scsi/scsi_devinfo.c
@@ -24,6 +24,13 @@ struct scsi_dev_info_list {
unsigned compatible; /* for use with scsi_static_device_list entries */
};
+struct scsi_dev_info_list_table {
+ struct list_head node; /* our node for being on the master list */
+ struct list_head scsi_dev_info_list; /* head of dev info list */
+ const char *name; /* name of list for /proc (NULL for global) */
+ int key; /* unique numeric identifier */
+};
+
static const char spaces[] = " "; /* 16 of them */
static unsigned scsi_default_dev_flags;
@@ -247,6 +254,22 @@ static struct {
{ NULL, NULL, NULL, 0 },
};
+static struct scsi_dev_info_list_table *scsi_devinfo_lookup_by_key(int key)
+{
+ struct scsi_dev_info_list_table *devinfo_table;
+ int found = 0;
+
+ list_for_each_entry(devinfo_table, &scsi_dev_info_list, node)
+ if (devinfo_table->key == key) {
+ found = 1;
+ break;
+ }
+ if (!found)
+ return ERR_PTR(-EINVAL);
+
+ return devinfo_table;
+}
+
/*
* scsi_strcpy_devinfo: called from scsi_dev_info_list_add to copy into
* devinfo vendor and model strings.
@@ -296,7 +319,38 @@ static void scsi_strcpy_devinfo(char *name, char *to, size_t to_length,
static int scsi_dev_info_list_add(int compatible, char *vendor, char *model,
char *strflags, int flags)
{
+ return scsi_dev_info_list_add_keyed(compatible, vendor, model,
+ strflags, flags,
+ SCSI_DEVINFO_GLOBAL);
+}
+
+/**
+ * scsi_dev_info_list_add_keyed - add one dev_info list entry.
+ * @compatible: if true, null terminate short strings. Otherwise space pad.
+ * @vendor: vendor string
+ * @model: model (product) string
+ * @strflags: integer string
+ * @flags: if strflags NULL, use this flag value
+ * @key: specify list to use
+ *
+ * Description:
+ * Create and add one dev_info entry for @vendor, @model,
+ * @strflags or @flag in list specified by @key. If @compatible,
+ * add to the tail of the list, do not space pad, and set
+ * devinfo->compatible. The scsi_static_device_list entries are
+ * added with @compatible 1 and @clfags NULL.
+ *
+ * Returns: 0 OK, -error on failure.
+ **/
+int scsi_dev_info_list_add_keyed(int compatible, char *vendor, char *model,
+ char *strflags, int flags, int key)
+{
struct scsi_dev_info_list *devinfo;
+ struct scsi_dev_info_list_table *devinfo_table =
+ scsi_devinfo_lookup_by_key(key);
+
+ if (IS_ERR(devinfo_table))
+ return PTR_ERR(devinfo_table);
devinfo = kmalloc(sizeof(*devinfo), GFP_KERNEL);
if (!devinfo) {
@@ -317,12 +371,15 @@ static int scsi_dev_info_list_add(int compatible, char *vendor, char *model,
devinfo->compatible = compatible;
if (compatible)
- list_add_tail(&devinfo->dev_info_list, &scsi_dev_info_list);
+ list_add_tail(&devinfo->dev_info_list,
+ &devinfo_table->scsi_dev_info_list);
else
- list_add(&devinfo->dev_info_list, &scsi_dev_info_list);
+ list_add(&devinfo->dev_info_list,
+ &devinfo_table->scsi_dev_info_list);
return 0;
}
+EXPORT_SYMBOL(scsi_dev_info_list_add_keyed);
/**
* scsi_dev_info_list_add_str - parse dev_list and add to the scsi_dev_info_list.
@@ -382,22 +439,48 @@ static int scsi_dev_info_list_add_str(char *dev_list)
* @model: model name
*
* Description:
- * Search the scsi_dev_info_list for an entry matching @vendor and
- * @model, if found, return the matching flags value, else return
- * the host or global default settings. Called during scan time.
+ * Search the global scsi_dev_info_list (specified by list zero)
+ * for an entry matching @vendor and @model, if found, return the
+ * matching flags value, else return the host or global default
+ * settings. Called during scan time.
**/
int scsi_get_device_flags(struct scsi_device *sdev,
const unsigned char *vendor,
const unsigned char *model)
{
+ return scsi_get_device_flags_keyed(sdev, vendor, model,
+ SCSI_DEVINFO_GLOBAL);
+}
+
+
+/**
+ * get_device_flags_keyed - get device specific flags from the dynamic device list.
+ * @sdev: &scsi_device to get flags for
+ * @vendor: vendor name
+ * @model: model name
+ * @key: list to look up
+ *
+ * Description:
+ * Search the scsi_dev_info_list specified by @key for an entry
+ * matching @vendor and @model, if found, return the matching
+ * flags value, else return the host or global default settings.
+ * Called during scan time.
+ **/
+int scsi_get_device_flags_keyed(struct scsi_device *sdev,
+ const unsigned char *vendor,
+ const unsigned char *model,
+ int key)
+{
struct scsi_dev_info_list *devinfo;
- unsigned int bflags;
+ struct scsi_dev_info_list_table *devinfo_table;
+
+ devinfo_table = scsi_devinfo_lookup_by_key(key);
- bflags = sdev->sdev_bflags;
- if (!bflags)
- bflags = scsi_default_dev_flags;
+ if (IS_ERR(devinfo_table))
+ return PTR_ERR(devinfo_table);
- list_for_each_entry(devinfo, &scsi_dev_info_list, dev_info_list) {
+ list_for_each_entry(devinfo, &devinfo_table->scsi_dev_info_list,
+ dev_info_list) {
if (devinfo->compatible) {
/*
* Behave like the older version of get_device_flags.
@@ -447,32 +530,89 @@ int scsi_get_device_flags(struct scsi_device *sdev,
return devinfo->flags;
}
}
- return bflags;
+ /* nothing found, return nothing */
+ if (key != SCSI_DEVINFO_GLOBAL)
+ return 0;
+
+ /* except for the global list, where we have an exception */
+ if (sdev->sdev_bflags)
+ return sdev->sdev_bflags;
+
+ return scsi_default_dev_flags;
}
+EXPORT_SYMBOL(scsi_get_device_flags_keyed);
#ifdef CONFIG_SCSI_PROC_FS
+struct double_list {
+ struct list_head *top;
+ struct list_head *bottom;
+};
+
static int devinfo_seq_show(struct seq_file *m, void *v)
{
+ struct double_list *dl = v;
+ struct scsi_dev_info_list_table *devinfo_table =
+ list_entry(dl->top, struct scsi_dev_info_list_table, node);
struct scsi_dev_info_list *devinfo =
- list_entry(v, struct scsi_dev_info_list, dev_info_list);
+ list_entry(dl->bottom, struct scsi_dev_info_list,
+ dev_info_list);
+
+ if (devinfo_table->scsi_dev_info_list.next == dl->bottom &&
+ devinfo_table->name)
+ seq_printf(m, "[%s]:\n", devinfo_table->name);
seq_printf(m, "'%.8s' '%.16s' 0x%x\n",
- devinfo->vendor, devinfo->model, devinfo->flags);
+ devinfo->vendor, devinfo->model, devinfo->flags);
return 0;
}
-static void * devinfo_seq_start(struct seq_file *m, loff_t *pos)
+static void *devinfo_seq_start(struct seq_file *m, loff_t *ppos)
{
- return seq_list_start(&scsi_dev_info_list, *pos);
+ struct double_list *dl = kmalloc(sizeof(*dl), GFP_KERNEL);
+ loff_t pos = *ppos;
+
+ if (!dl)
+ return NULL;
+
+ list_for_each(dl->top, &scsi_dev_info_list) {
+ struct scsi_dev_info_list_table *devinfo_table =
+ list_entry(dl->top, struct scsi_dev_info_list_table,
+ node);
+ list_for_each(dl->bottom, &devinfo_table->scsi_dev_info_list)
+ if (pos-- == 0)
+ return dl;
+ }
+
+ kfree(dl);
+ return NULL;
}
-static void * devinfo_seq_next(struct seq_file *m, void *v, loff_t *pos)
+static void *devinfo_seq_next(struct seq_file *m, void *v, loff_t *ppos)
{
- return seq_list_next(v, &scsi_dev_info_list, pos);
+ struct double_list *dl = v;
+ struct scsi_dev_info_list_table *devinfo_table =
+ list_entry(dl->top, struct scsi_dev_info_list_table, node);
+
+ ++*ppos;
+ dl->bottom = dl->bottom->next;
+ while (&devinfo_table->scsi_dev_info_list == dl->bottom) {
+ dl->top = dl->top->next;
+ if (dl->top == &scsi_dev_info_list) {
+ kfree(dl);
+ return NULL;
+ }
+ devinfo_table = list_entry(dl->top,
+ struct scsi_dev_info_list_table,
+ node);
+ dl->bottom = devinfo_table->scsi_dev_info_list.next;
+ }
+
+ return dl;
}
static void devinfo_seq_stop(struct seq_file *m, void *v)
{
+ kfree(v);
}
static const struct seq_operations scsi_devinfo_seq_ops = {
@@ -549,19 +689,78 @@ MODULE_PARM_DESC(default_dev_flags,
**/
void scsi_exit_devinfo(void)
{
- struct list_head *lh, *lh_next;
- struct scsi_dev_info_list *devinfo;
-
#ifdef CONFIG_SCSI_PROC_FS
remove_proc_entry("scsi/device_info", NULL);
#endif
- list_for_each_safe(lh, lh_next, &scsi_dev_info_list) {
+ scsi_dev_info_remove_list(SCSI_DEVINFO_GLOBAL);
+}
+
+/**
+ * scsi_dev_info_add_list - add a new devinfo list
+ * @key: key of the list to add
+ * @name: Name of the list to add (for /proc/scsi/device_info)
+ *
+ * Adds the requested list, returns zero on success, -EEXIST if the
+ * key is already registered to a list, or other error on failure.
+ */
+int scsi_dev_info_add_list(int key, const char *name)
+{
+ struct scsi_dev_info_list_table *devinfo_table =
+ scsi_devinfo_lookup_by_key(key);
+
+ if (!IS_ERR(devinfo_table))
+ /* list already exists */
+ return -EEXIST;
+
+ devinfo_table = kmalloc(sizeof(*devinfo_table), GFP_KERNEL);
+
+ if (!devinfo_table)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&devinfo_table->node);
+ INIT_LIST_HEAD(&devinfo_table->scsi_dev_info_list);
+ devinfo_table->name = name;
+ devinfo_table->key = key;
+ list_add_tail(&devinfo_table->node, &scsi_dev_info_list);
+
+ return 0;
+}
+EXPORT_SYMBOL(scsi_dev_info_add_list);
+
+/**
+ * scsi_dev_info_remove_list - destroy an added devinfo list
+ * @key: key of the list to destroy
+ *
+ * Iterates over the entire list first, freeing all the values, then
+ * frees the list itself. Returns 0 on success or -EINVAL if the key
+ * can't be found.
+ */
+int scsi_dev_info_remove_list(int key)
+{
+ struct list_head *lh, *lh_next;
+ struct scsi_dev_info_list_table *devinfo_table =
+ scsi_devinfo_lookup_by_key(key);
+
+ if (IS_ERR(devinfo_table))
+ /* no such list */
+ return -EINVAL;
+
+ /* remove from the master list */
+ list_del(&devinfo_table->node);
+
+ list_for_each_safe(lh, lh_next, &devinfo_table->scsi_dev_info_list) {
+ struct scsi_dev_info_list *devinfo;
+
devinfo = list_entry(lh, struct scsi_dev_info_list,
dev_info_list);
kfree(devinfo);
}
+ kfree(devinfo_table);
+
+ return 0;
}
+EXPORT_SYMBOL(scsi_dev_info_remove_list);
/**
* scsi_init_devinfo - set up the dynamic device list.
@@ -577,10 +776,14 @@ int __init scsi_init_devinfo(void)
#endif
int error, i;
- error = scsi_dev_info_list_add_str(scsi_dev_flags);
+ error = scsi_dev_info_add_list(SCSI_DEVINFO_GLOBAL, NULL);
if (error)
return error;
+ error = scsi_dev_info_list_add_str(scsi_dev_flags);
+ if (error)
+ goto out;
+
for (i = 0; scsi_static_device_list[i].vendor; i++) {
error = scsi_dev_info_list_add(1 /* compatibile */,
scsi_static_device_list[i].vendor,
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 30f3275e119e..f3c40898fc7d 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -1207,6 +1207,7 @@ int scsi_prep_fn(struct request_queue *q, struct request *req)
ret = scsi_setup_blk_pc_cmnd(sdev, req);
return scsi_prep_return(q, req, ret);
}
+EXPORT_SYMBOL(scsi_prep_fn);
/*
* scsi_dev_queue_ready: if we can send requests to sdev, return 1 else
diff --git a/drivers/scsi/scsi_priv.h b/drivers/scsi/scsi_priv.h
index fbc83bebdd8e..021e503c8c44 100644
--- a/drivers/scsi/scsi_priv.h
+++ b/drivers/scsi/scsi_priv.h
@@ -39,9 +39,25 @@ static inline void scsi_log_completion(struct scsi_cmnd *cmd, int disposition)
#endif
/* scsi_devinfo.c */
+
+/* list of keys for the lists */
+enum {
+ SCSI_DEVINFO_GLOBAL = 0,
+ SCSI_DEVINFO_SPI,
+};
+
extern int scsi_get_device_flags(struct scsi_device *sdev,
const unsigned char *vendor,
const unsigned char *model);
+extern int scsi_get_device_flags_keyed(struct scsi_device *sdev,
+ const unsigned char *vendor,
+ const unsigned char *model, int key);
+extern int scsi_dev_info_list_add_keyed(int compatible, char *vendor,
+ char *model, char *strflags,
+ int flags, int key);
+extern int scsi_dev_info_add_list(int key, const char *name);
+extern int scsi_dev_info_remove_list(int key);
+
extern int __init scsi_init_devinfo(void);
extern void scsi_exit_devinfo(void);
@@ -71,7 +87,6 @@ extern int scsi_init_queue(void);
extern void scsi_exit_queue(void);
struct request_queue;
struct request;
-extern int scsi_prep_fn(struct request_queue *, struct request *);
extern struct kmem_cache *scsi_sdb_cache;
/* scsi_proc.c */
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index fa4711d12744..91482f2dcc50 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -420,29 +420,12 @@ static int scsi_bus_resume(struct device * dev)
return err;
}
-static int scsi_bus_remove(struct device *dev)
-{
- struct device_driver *drv = dev->driver;
- struct scsi_device *sdev = to_scsi_device(dev);
- int err = 0;
-
- /* reset the prep_fn back to the default since the
- * driver may have altered it and it's being removed */
- blk_queue_prep_rq(sdev->request_queue, scsi_prep_fn);
-
- if (drv && drv->remove)
- err = drv->remove(dev);
-
- return 0;
-}
-
struct bus_type scsi_bus_type = {
.name = "scsi",
.match = scsi_bus_match,
.uevent = scsi_bus_uevent,
.suspend = scsi_bus_suspend,
.resume = scsi_bus_resume,
- .remove = scsi_bus_remove,
};
EXPORT_SYMBOL_GPL(scsi_bus_type);
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
index 3f64d93b6c8b..292c02f810d0 100644
--- a/drivers/scsi/scsi_transport_fc.c
+++ b/drivers/scsi/scsi_transport_fc.c
@@ -3397,7 +3397,6 @@ fc_destroy_bsgjob(struct fc_bsg_job *job)
kfree(job);
}
-
/**
* fc_bsg_jobdone - completion routine for bsg requests that the LLD has
* completed
@@ -3408,15 +3407,10 @@ fc_bsg_jobdone(struct fc_bsg_job *job)
{
struct request *req = job->req;
struct request *rsp = req->next_rq;
- unsigned long flags;
int err;
- spin_lock_irqsave(&job->job_lock, flags);
- job->state_flags |= FC_RQST_STATE_DONE;
- job->ref_cnt--;
- spin_unlock_irqrestore(&job->job_lock, flags);
-
err = job->req->errors = job->reply->result;
+
if (err < 0)
/* we're only returning the result field in the reply */
job->req->sense_len = sizeof(uint32_t);
@@ -3433,13 +3427,27 @@ fc_bsg_jobdone(struct fc_bsg_job *job)
rsp->resid_len -= min(job->reply->reply_payload_rcv_len,
rsp->resid_len);
}
+ blk_complete_request(req);
+}
+
+/**
+ * fc_bsg_softirq_done - softirq done routine for destroying the bsg requests
+ * @req: BSG request that holds the job to be destroyed
+ */
+static void fc_bsg_softirq_done(struct request *rq)
+{
+ struct fc_bsg_job *job = rq->special;
+ unsigned long flags;
- blk_end_request_all(req, err);
+ spin_lock_irqsave(&job->job_lock, flags);
+ job->state_flags |= FC_RQST_STATE_DONE;
+ job->ref_cnt--;
+ spin_unlock_irqrestore(&job->job_lock, flags);
+ blk_end_request_all(rq, rq->errors);
fc_destroy_bsgjob(job);
}
-
/**
* fc_bsg_job_timeout - handler for when a bsg request timesout
* @req: request that timed out
@@ -3471,19 +3479,13 @@ fc_bsg_job_timeout(struct request *req)
"abort failed with status %d\n", err);
}
- if (!done) {
- spin_lock_irqsave(&job->job_lock, flags);
- job->ref_cnt--;
- spin_unlock_irqrestore(&job->job_lock, flags);
- fc_destroy_bsgjob(job);
- }
-
/* the blk_end_sync_io() doesn't check the error */
- return BLK_EH_HANDLED;
+ if (done)
+ return BLK_EH_NOT_HANDLED;
+ else
+ return BLK_EH_HANDLED;
}
-
-
static int
fc_bsg_map_buffer(struct fc_bsg_buffer *buf, struct request *req)
{
@@ -3668,13 +3670,14 @@ static void
fc_bsg_goose_queue(struct fc_rport *rport)
{
int flagset;
+ unsigned long flags;
if (!rport->rqst_q)
return;
get_device(&rport->dev);
- spin_lock(rport->rqst_q->queue_lock);
+ spin_lock_irqsave(rport->rqst_q->queue_lock, flags);
flagset = test_bit(QUEUE_FLAG_REENTER, &rport->rqst_q->queue_flags) &&
!test_bit(QUEUE_FLAG_REENTER, &rport->rqst_q->queue_flags);
if (flagset)
@@ -3682,7 +3685,7 @@ fc_bsg_goose_queue(struct fc_rport *rport)
__blk_run_queue(rport->rqst_q);
if (flagset)
queue_flag_clear(QUEUE_FLAG_REENTER, rport->rqst_q);
- spin_unlock(rport->rqst_q->queue_lock);
+ spin_unlock_irqrestore(rport->rqst_q->queue_lock, flags);
put_device(&rport->dev);
}
@@ -3859,7 +3862,7 @@ fc_bsg_hostadd(struct Scsi_Host *shost, struct fc_host_attrs *fc_host)
struct fc_internal *i = to_fc_internal(shost->transportt);
struct request_queue *q;
int err;
- char bsg_name[BUS_ID_SIZE]; /*20*/
+ char bsg_name[20];
fc_host->rqst_q = NULL;
@@ -3879,6 +3882,7 @@ fc_bsg_hostadd(struct Scsi_Host *shost, struct fc_host_attrs *fc_host)
q->queuedata = shost;
queue_flag_set_unlocked(QUEUE_FLAG_BIDI, q);
+ blk_queue_softirq_done(q, fc_bsg_softirq_done);
blk_queue_rq_timed_out(q, fc_bsg_job_timeout);
blk_queue_rq_timeout(q, FC_DEFAULT_BSG_TIMEOUT);
@@ -3924,6 +3928,7 @@ fc_bsg_rportadd(struct Scsi_Host *shost, struct fc_rport *rport)
q->queuedata = rport;
queue_flag_set_unlocked(QUEUE_FLAG_BIDI, q);
+ blk_queue_softirq_done(q, fc_bsg_softirq_done);
blk_queue_rq_timed_out(q, fc_bsg_job_timeout);
blk_queue_rq_timeout(q, BLK_DEFAULT_SG_TIMEOUT);
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index f3e664628d7a..783e33c65eb7 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -692,6 +692,7 @@ int iscsi_add_session(struct iscsi_cls_session *session, unsigned int target_id)
"Too many iscsi targets. Max "
"number of targets is %d.\n",
ISCSI_MAX_TARGET - 1);
+ err = -EOVERFLOW;
goto release_host;
}
}
diff --git a/drivers/scsi/scsi_transport_sas.c b/drivers/scsi/scsi_transport_sas.c
index d606452297cf..0895d3c71b03 100644
--- a/drivers/scsi/scsi_transport_sas.c
+++ b/drivers/scsi/scsi_transport_sas.c
@@ -173,9 +173,9 @@ static void sas_smp_request(struct request_queue *q, struct Scsi_Host *shost,
ret = handler(shost, rphy, req);
req->errors = ret;
- spin_lock_irq(q->queue_lock);
+ blk_end_request_all(req, ret);
- req->end_io(req, ret);
+ spin_lock_irq(q->queue_lock);
}
}
diff --git a/drivers/scsi/scsi_transport_spi.c b/drivers/scsi/scsi_transport_spi.c
index 654a34fb04cb..c25bd9a34e02 100644
--- a/drivers/scsi/scsi_transport_spi.c
+++ b/drivers/scsi/scsi_transport_spi.c
@@ -46,6 +46,22 @@
#define DV_RETRIES 3 /* should only need at most
* two cc/ua clears */
+/* Our blacklist flags */
+enum {
+ SPI_BLIST_NOIUS = 0x1,
+};
+
+/* blacklist table, modelled on scsi_devinfo.c */
+static struct {
+ char *vendor;
+ char *model;
+ unsigned flags;
+} spi_static_device_list[] __initdata = {
+ {"HP", "Ultrium 3-SCSI", SPI_BLIST_NOIUS },
+ {"IBM", "ULTRIUM-TD3", SPI_BLIST_NOIUS },
+ {NULL, NULL, 0}
+};
+
/* Private data accessors (keep these out of the header file) */
#define spi_dv_in_progress(x) (((struct spi_transport_attrs *)&(x)->starget_data)->dv_in_progress)
#define spi_dv_mutex(x) (((struct spi_transport_attrs *)&(x)->starget_data)->dv_mutex)
@@ -207,6 +223,9 @@ static int spi_device_configure(struct transport_container *tc,
{
struct scsi_device *sdev = to_scsi_device(dev);
struct scsi_target *starget = sdev->sdev_target;
+ unsigned bflags = scsi_get_device_flags_keyed(sdev, &sdev->inquiry[8],
+ &sdev->inquiry[16],
+ SCSI_DEVINFO_SPI);
/* Populate the target capability fields with the values
* gleaned from the device inquiry */
@@ -216,6 +235,10 @@ static int spi_device_configure(struct transport_container *tc,
spi_support_dt(starget) = scsi_device_dt(sdev);
spi_support_dt_only(starget) = scsi_device_dt_only(sdev);
spi_support_ius(starget) = scsi_device_ius(sdev);
+ if (bflags & SPI_BLIST_NOIUS) {
+ dev_info(dev, "Information Units disabled by blacklist\n");
+ spi_support_ius(starget) = 0;
+ }
spi_support_qas(starget) = scsi_device_qas(sdev);
return 0;
@@ -833,7 +856,7 @@ spi_dv_device_internal(struct scsi_device *sdev, u8 *buffer)
return;
}
- if (!scsi_device_wide(sdev)) {
+ if (!spi_support_wide(starget)) {
spi_max_width(starget) = 0;
max_width = 0;
}
@@ -860,7 +883,7 @@ spi_dv_device_internal(struct scsi_device *sdev, u8 *buffer)
return;
/* device can't handle synchronous */
- if (!scsi_device_sync(sdev) && !scsi_device_dt(sdev))
+ if (!spi_support_sync(starget) && !spi_support_dt(starget))
return;
/* len == -1 is the signal that we need to ascertain the
@@ -876,13 +899,14 @@ spi_dv_device_internal(struct scsi_device *sdev, u8 *buffer)
/* try QAS requests; this should be harmless to set if the
* target supports it */
- if (scsi_device_qas(sdev) && spi_max_qas(starget)) {
+ if (spi_support_qas(starget) && spi_max_qas(starget)) {
DV_SET(qas, 1);
} else {
DV_SET(qas, 0);
}
- if (scsi_device_ius(sdev) && spi_max_iu(starget) && min_period < 9) {
+ if (spi_support_ius(starget) && spi_max_iu(starget) &&
+ min_period < 9) {
/* This u320 (or u640). Set IU transfers */
DV_SET(iu, 1);
/* Then set the optional parameters */
@@ -902,7 +926,7 @@ spi_dv_device_internal(struct scsi_device *sdev, u8 *buffer)
i->f->get_signalling(shost);
if (spi_signalling(shost) == SPI_SIGNAL_SE ||
spi_signalling(shost) == SPI_SIGNAL_HVD ||
- !scsi_device_dt(sdev)) {
+ !spi_support_dt(starget)) {
DV_SET(dt, 0);
} else {
DV_SET(dt, 1);
@@ -1523,7 +1547,21 @@ EXPORT_SYMBOL(spi_release_transport);
static __init int spi_transport_init(void)
{
- int error = transport_class_register(&spi_transport_class);
+ int error = scsi_dev_info_add_list(SCSI_DEVINFO_SPI,
+ "SCSI Parallel Transport Class");
+ if (!error) {
+ int i;
+
+ for (i = 0; spi_static_device_list[i].vendor; i++)
+ scsi_dev_info_list_add_keyed(1, /* compatible */
+ spi_static_device_list[i].vendor,
+ spi_static_device_list[i].model,
+ NULL,
+ spi_static_device_list[i].flags,
+ SCSI_DEVINFO_SPI);
+ }
+
+ error = transport_class_register(&spi_transport_class);
if (error)
return error;
error = anon_transport_class_register(&spi_device_class);
@@ -1535,6 +1573,7 @@ static void __exit spi_transport_exit(void)
transport_class_unregister(&spi_transport_class);
anon_transport_class_unregister(&spi_device_class);
transport_class_unregister(&spi_host_class);
+ scsi_dev_info_remove_list(SCSI_DEVINFO_SPI);
}
MODULE_AUTHOR("Martin Hicks");
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 878b17a9af30..5616cd780ff3 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -1307,6 +1307,7 @@ static int read_capacity_16(struct scsi_disk *sdkp, struct scsi_device *sdp,
int sense_valid = 0;
int the_result;
int retries = 3;
+ unsigned int alignment;
unsigned long long lba;
unsigned sector_size;
@@ -1358,6 +1359,16 @@ static int read_capacity_16(struct scsi_disk *sdkp, struct scsi_device *sdp,
return -EOVERFLOW;
}
+ /* Logical blocks per physical block exponent */
+ sdkp->hw_sector_size = (1 << (buffer[13] & 0xf)) * sector_size;
+
+ /* Lowest aligned logical block */
+ alignment = ((buffer[14] & 0x3f) << 8 | buffer[15]) * sector_size;
+ blk_queue_alignment_offset(sdp->request_queue, alignment);
+ if (alignment && sdkp->first_scan)
+ sd_printk(KERN_NOTICE, sdkp,
+ "physical block alignment offset: %u\n", alignment);
+
sdkp->capacity = lba + 1;
return sector_size;
}
@@ -1409,6 +1420,7 @@ static int read_capacity_10(struct scsi_disk *sdkp, struct scsi_device *sdp,
}
sdkp->capacity = lba + 1;
+ sdkp->hw_sector_size = sector_size;
return sector_size;
}
@@ -1521,11 +1533,17 @@ got_data:
string_get_size(sz, STRING_UNITS_10, cap_str_10,
sizeof(cap_str_10));
- if (sdkp->first_scan || old_capacity != sdkp->capacity)
+ if (sdkp->first_scan || old_capacity != sdkp->capacity) {
sd_printk(KERN_NOTICE, sdkp,
- "%llu %d-byte hardware sectors: (%s/%s)\n",
+ "%llu %d-byte logical blocks: (%s/%s)\n",
(unsigned long long)sdkp->capacity,
sector_size, cap_str_10, cap_str_2);
+
+ if (sdkp->hw_sector_size != sector_size)
+ sd_printk(KERN_NOTICE, sdkp,
+ "%u-byte physical blocks\n",
+ sdkp->hw_sector_size);
+ }
}
/* Rescale capacity to 512-byte units */
@@ -1538,6 +1556,7 @@ got_data:
else if (sector_size == 256)
sdkp->capacity >>= 1;
+ blk_queue_physical_block_size(sdp->request_queue, sdkp->hw_sector_size);
sdkp->device->sector_size = sector_size;
}
@@ -1776,6 +1795,52 @@ void sd_read_app_tag_own(struct scsi_disk *sdkp, unsigned char *buffer)
}
/**
+ * sd_read_block_limits - Query disk device for preferred I/O sizes.
+ * @disk: disk to query
+ */
+static void sd_read_block_limits(struct scsi_disk *sdkp)
+{
+ unsigned int sector_sz = sdkp->device->sector_size;
+ char *buffer;
+
+ /* Block Limits VPD */
+ buffer = scsi_get_vpd_page(sdkp->device, 0xb0);
+
+ if (buffer == NULL)
+ return;
+
+ blk_queue_io_min(sdkp->disk->queue,
+ get_unaligned_be16(&buffer[6]) * sector_sz);
+ blk_queue_io_opt(sdkp->disk->queue,
+ get_unaligned_be32(&buffer[12]) * sector_sz);
+
+ kfree(buffer);
+}
+
+/**
+ * sd_read_block_characteristics - Query block dev. characteristics
+ * @disk: disk to query
+ */
+static void sd_read_block_characteristics(struct scsi_disk *sdkp)
+{
+ char *buffer;
+ u16 rot;
+
+ /* Block Device Characteristics VPD */
+ buffer = scsi_get_vpd_page(sdkp->device, 0xb1);
+
+ if (buffer == NULL)
+ return;
+
+ rot = get_unaligned_be16(&buffer[4]);
+
+ if (rot == 1)
+ queue_flag_set_unlocked(QUEUE_FLAG_NONROT, sdkp->disk->queue);
+
+ kfree(buffer);
+}
+
+/**
* sd_revalidate_disk - called the first time a new disk is seen,
* performs disk spin up, read_capacity, etc.
* @disk: struct gendisk we care about
@@ -1812,6 +1877,8 @@ static int sd_revalidate_disk(struct gendisk *disk)
*/
if (sdkp->media_present) {
sd_read_capacity(sdkp, buffer);
+ sd_read_block_limits(sdkp);
+ sd_read_block_characteristics(sdkp);
sd_read_write_protect_flag(sdkp, buffer);
sd_read_cache_type(sdkp, buffer);
sd_read_app_tag_own(sdkp, buffer);
@@ -1934,6 +2001,8 @@ static void sd_probe_async(void *data, async_cookie_t cookie)
add_disk(gd);
sd_dif_config_host(sdkp);
+ sd_revalidate_disk(gd);
+
sd_printk(KERN_NOTICE, sdkp, "Attached SCSI %sdisk\n",
sdp->removable ? "removable " : "");
}
@@ -2054,6 +2123,7 @@ static int sd_remove(struct device *dev)
async_synchronize_full();
sdkp = dev_get_drvdata(dev);
+ blk_queue_prep_rq(sdkp->device->request_queue, scsi_prep_fn);
device_del(&sdkp->dev);
del_gendisk(sdkp->disk);
sd_shutdown(dev);
diff --git a/drivers/scsi/sd.h b/drivers/scsi/sd.h
index 708778cf5f06..8474b5bad3fe 100644
--- a/drivers/scsi/sd.h
+++ b/drivers/scsi/sd.h
@@ -45,6 +45,7 @@ struct scsi_disk {
unsigned int openers; /* protected by BKL for now, yuck */
sector_t capacity; /* size in 512-byte sectors */
u32 index;
+ unsigned short hw_sector_size;
u8 media_present;
u8 write_prot;
u8 protection_type;/* Data Integrity Field */
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index 8201387b4daa..9230402c45af 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -210,13 +210,11 @@ static void sg_put_dev(Sg_device *sdp);
static int sg_allow_access(struct file *filp, unsigned char *cmd)
{
struct sg_fd *sfp = (struct sg_fd *)filp->private_data;
- struct request_queue *q = sfp->parentdp->device->request_queue;
if (sfp->parentdp->device->type == TYPE_SCANNER)
return 0;
- return blk_verify_command(&q->cmd_filter,
- cmd, filp->f_mode & FMODE_WRITE);
+ return blk_verify_command(cmd, filp->f_mode & FMODE_WRITE);
}
static int
@@ -621,7 +619,7 @@ sg_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos)
if (strcmp(current->comm, cmd) && printk_ratelimit()) {
printk(KERN_WARNING
"sg_write: data in/out %d/%d bytes for SCSI command 0x%x--"
- "guessing data in;\n" KERN_WARNING " "
+ "guessing data in;\n "
"program %s not setting count and/or reply_len properly\n",
old_hdr.reply_len - (int)SZ_SG_HEADER,
input_size, (unsigned int) cmnd[0],
@@ -1658,6 +1656,10 @@ static int sg_start_req(Sg_request *srp, unsigned char *cmd)
md->nr_entries = req_schp->k_use_sg;
md->offset = 0;
md->null_mapped = hp->dxferp ? 0 : 1;
+ if (dxfer_dir == SG_DXFER_TO_FROM_DEV)
+ md->from_user = 1;
+ else
+ md->from_user = 0;
}
if (iov_count) {
diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c
index cd350dfc1216..cce0fe4c8a3b 100644
--- a/drivers/scsi/sr.c
+++ b/drivers/scsi/sr.c
@@ -881,6 +881,7 @@ static int sr_remove(struct device *dev)
{
struct scsi_cd *cd = dev_get_drvdata(dev);
+ blk_queue_prep_rq(cd->device->request_queue, scsi_prep_fn);
del_gendisk(cd->disk);
mutex_lock(&sr_ref_mutex);
diff --git a/drivers/scsi/sun3_NCR5380.c b/drivers/scsi/sun3_NCR5380.c
index bcaba86060ab..75da6e58ce55 100644
--- a/drivers/scsi/sun3_NCR5380.c
+++ b/drivers/scsi/sun3_NCR5380.c
@@ -2860,8 +2860,7 @@ static int NCR5380_abort(struct scsi_cmnd *cmd)
*/
local_irq_restore(flags);
- printk(KERN_INFO "scsi%d: warning : SCSI command probably completed successfully\n"
- KERN_INFO " before abortion\n", HOSTNO);
+ printk(KERN_INFO "scsi%d: warning : SCSI command probably completed successfully before abortion\n", HOSTNO);
return SCSI_ABORT_NOT_RUNNING;
}
diff --git a/drivers/scsi/sym53c8xx_2/sym_hipd.c b/drivers/scsi/sym53c8xx_2/sym_hipd.c
index 69ad4945c936..297deb817a5d 100644
--- a/drivers/scsi/sym53c8xx_2/sym_hipd.c
+++ b/drivers/scsi/sym53c8xx_2/sym_hipd.c
@@ -2321,8 +2321,9 @@ static void sym_int_par (struct sym_hcb *np, u_short sist)
int phase = cmd & 7;
struct sym_ccb *cp = sym_ccb_from_dsa(np, dsa);
- printf("%s: SCSI parity error detected: SCR1=%d DBC=%x SBCL=%x\n",
- sym_name(np), hsts, dbc, sbcl);
+ if (printk_ratelimit())
+ printf("%s: SCSI parity error detected: SCR1=%d DBC=%x SBCL=%x\n",
+ sym_name(np), hsts, dbc, sbcl);
/*
* Check that the chip is connected to the SCSI BUS.
diff --git a/drivers/scsi/zalon.c b/drivers/scsi/zalon.c
index 97f3158fa7b5..27e84e4b1fa9 100644
--- a/drivers/scsi/zalon.c
+++ b/drivers/scsi/zalon.c
@@ -134,7 +134,7 @@ zalon_probe(struct parisc_device *dev)
host = ncr_attach(&zalon7xx_template, unit, &device);
if (!host)
- goto fail;
+ return -ENODEV;
if (request_irq(dev->irq, ncr53c8xx_intr, IRQF_SHARED, "zalon", host)) {
dev_printk(KERN_ERR, &dev->dev, "irq problem with %d, detaching\n ",
diff --git a/drivers/serial/8250_pci.c b/drivers/serial/8250_pci.c
index e371a9c15341..e7108e75653d 100644
--- a/drivers/serial/8250_pci.c
+++ b/drivers/serial/8250_pci.c
@@ -60,11 +60,12 @@ struct serial_private {
static void moan_device(const char *str, struct pci_dev *dev)
{
- printk(KERN_WARNING "%s: %s\n"
- KERN_WARNING "Please send the output of lspci -vv, this\n"
- KERN_WARNING "message (0x%04x,0x%04x,0x%04x,0x%04x), the\n"
- KERN_WARNING "manufacturer and name of serial board or\n"
- KERN_WARNING "modem board to rmk+serial@arm.linux.org.uk.\n",
+ printk(KERN_WARNING
+ "%s: %s\n"
+ "Please send the output of lspci -vv, this\n"
+ "message (0x%04x,0x%04x,0x%04x,0x%04x), the\n"
+ "manufacturer and name of serial board or\n"
+ "modem board to rmk+serial@arm.linux.org.uk.\n",
pci_name(dev), str, dev->vendor, dev->device,
dev->subsystem_vendor, dev->subsystem_device);
}
@@ -398,8 +399,7 @@ static int sbs_init(struct pci_dev *dev)
{
u8 __iomem *p;
- p = ioremap_nocache(pci_resource_start(dev, 0),
- pci_resource_len(dev, 0));
+ p = pci_ioremap_bar(dev, 0);
if (p == NULL)
return -ENOMEM;
@@ -423,8 +423,7 @@ static void __devexit sbs_exit(struct pci_dev *dev)
{
u8 __iomem *p;
- p = ioremap_nocache(pci_resource_start(dev, 0),
- pci_resource_len(dev, 0));
+ p = pci_ioremap_bar(dev, 0);
/* FIXME: What if resource_len < OCT_REG_CR_OFF */
if (p != NULL)
writeb(0, p + OCT_REG_CR_OFF);
@@ -761,6 +760,8 @@ static int pci_netmos_init(struct pci_dev *dev)
/* subdevice 0x00PS means <P> parallel, <S> serial */
unsigned int num_serial = dev->subsystem_device & 0xf;
+ if (dev->device == PCI_DEVICE_ID_NETMOS_9901)
+ return 0;
if (dev->subsystem_vendor == PCI_VENDOR_ID_IBM &&
dev->subsystem_device == 0x0299)
return 0;
@@ -3559,6 +3560,10 @@ static struct pci_device_id serial_pci_tbl[] = {
PCI_VENDOR_ID_IBM, 0x0299,
0, 0, pbn_b0_bt_2_115200 },
+ { PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9901,
+ 0xA000, 0x1000,
+ 0, 0, pbn_b0_1_115200 },
+
/*
* These entries match devices with class COMMUNICATION_SERIAL,
* COMMUNICATION_MODEM or COMMUNICATION_MULTISERIAL
diff --git a/drivers/serial/Kconfig b/drivers/serial/Kconfig
index 1132c5cae7ab..037c1e0b7c4c 100644
--- a/drivers/serial/Kconfig
+++ b/drivers/serial/Kconfig
@@ -1320,6 +1320,16 @@ config SERIAL_SGI_IOC3
If you have an SGI Altix with an IOC3 serial card,
say Y or M. Otherwise, say N.
+config SERIAL_MSM
+ bool "MSM on-chip serial port support"
+ depends on ARM && ARCH_MSM
+ select SERIAL_CORE
+
+config SERIAL_MSM_CONSOLE
+ bool "MSM serial console support"
+ depends on SERIAL_MSM=y
+ select SERIAL_CORE_CONSOLE
+
config SERIAL_NETX
tristate "NetX serial port support"
depends on ARM && ARCH_NETX
diff --git a/drivers/serial/Makefile b/drivers/serial/Makefile
index 45a8658f54d5..d5a29981c6c4 100644
--- a/drivers/serial/Makefile
+++ b/drivers/serial/Makefile
@@ -71,6 +71,7 @@ obj-$(CONFIG_SERIAL_SGI_IOC4) += ioc4_serial.o
obj-$(CONFIG_SERIAL_SGI_IOC3) += ioc3_serial.o
obj-$(CONFIG_SERIAL_ATMEL) += atmel_serial.o
obj-$(CONFIG_SERIAL_UARTLITE) += uartlite.o
+obj-$(CONFIG_SERIAL_MSM) += msm_serial.o
obj-$(CONFIG_SERIAL_NETX) += netx-serial.o
obj-$(CONFIG_SERIAL_OF_PLATFORM) += of_serial.o
obj-$(CONFIG_SERIAL_OF_PLATFORM_NWPSERIAL) += nwpserial.o
diff --git a/drivers/serial/bfin_5xx.c b/drivers/serial/bfin_5xx.c
index e2f6b1bfac98..b4a7650af696 100644
--- a/drivers/serial/bfin_5xx.c
+++ b/drivers/serial/bfin_5xx.c
@@ -38,6 +38,10 @@
#include <asm/cacheflush.h>
#endif
+#ifdef CONFIG_SERIAL_BFIN_MODULE
+# undef CONFIG_EARLY_PRINTK
+#endif
+
/* UART name and device definitions */
#define BFIN_SERIAL_NAME "ttyBF"
#define BFIN_SERIAL_MAJOR 204
@@ -1110,6 +1114,7 @@ static void __init bfin_serial_init_ports(void)
bfin_serial_hw_init();
for (i = 0; i < nr_active_ports; i++) {
+ spin_lock_init(&bfin_serial_ports[i].port.lock);
bfin_serial_ports[i].port.uartclk = get_sclk();
bfin_serial_ports[i].port.fifosize = BFIN_UART_TX_FIFO_SIZE;
bfin_serial_ports[i].port.ops = &bfin_serial_pops;
diff --git a/drivers/serial/icom.c b/drivers/serial/icom.c
index 9f2891c2c4a2..cd1b6a45bb82 100644
--- a/drivers/serial/icom.c
+++ b/drivers/serial/icom.c
@@ -1548,8 +1548,7 @@ static int __devinit icom_probe(struct pci_dev *dev,
goto probe_exit1;
}
- icom_adapter->base_addr = ioremap(icom_adapter->base_addr_pci,
- pci_resource_len(dev, 0));
+ icom_adapter->base_addr = pci_ioremap_bar(dev, 0);
if (!icom_adapter->base_addr)
goto probe_exit1;
diff --git a/drivers/serial/jsm/jsm_tty.c b/drivers/serial/jsm/jsm_tty.c
index 107ce2e187b8..00f4577d2f7f 100644
--- a/drivers/serial/jsm/jsm_tty.c
+++ b/drivers/serial/jsm/jsm_tty.c
@@ -467,7 +467,7 @@ int __devinit jsm_uart_port_init(struct jsm_board *brd)
printk(KERN_INFO "jsm: linemap is full, added device failed\n");
continue;
} else
- set_bit((int)line, linemap);
+ set_bit(line, linemap);
brd->channels[i]->uart_port.line = line;
if (uart_add_one_port (&jsm_uart_driver, &brd->channels[i]->uart_port))
printk(KERN_INFO "jsm: add device failed\n");
@@ -503,7 +503,7 @@ int jsm_remove_uart_port(struct jsm_board *brd)
ch = brd->channels[i];
- clear_bit((int)(ch->uart_port.line), linemap);
+ clear_bit(ch->uart_port.line, linemap);
uart_remove_one_port(&jsm_uart_driver, &brd->channels[i]->uart_port);
}
diff --git a/drivers/serial/msm_serial.c b/drivers/serial/msm_serial.c
new file mode 100644
index 000000000000..698048f64f5e
--- /dev/null
+++ b/drivers/serial/msm_serial.c
@@ -0,0 +1,772 @@
+/*
+ * drivers/serial/msm_serial.c - driver for msm7k serial device and console
+ *
+ * Copyright (C) 2007 Google, Inc.
+ * Author: Robert Love <rlove@google.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#if defined(CONFIG_SERIAL_MSM_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
+# define SUPPORT_SYSRQ
+#endif
+
+#include <linux/hrtimer.h>
+#include <linux/module.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/irq.h>
+#include <linux/init.h>
+#include <linux/console.h>
+#include <linux/tty.h>
+#include <linux/tty_flip.h>
+#include <linux/serial_core.h>
+#include <linux/serial.h>
+#include <linux/clk.h>
+#include <linux/platform_device.h>
+
+#include "msm_serial.h"
+
+struct msm_port {
+ struct uart_port uart;
+ char name[16];
+ struct clk *clk;
+ unsigned int imr;
+};
+
+#define UART_TO_MSM(uart_port) ((struct msm_port *) uart_port)
+
+static inline void msm_write(struct uart_port *port, unsigned int val,
+ unsigned int off)
+{
+ __raw_writel(val, port->membase + off);
+}
+
+static inline unsigned int msm_read(struct uart_port *port, unsigned int off)
+{
+ return __raw_readl(port->membase + off);
+}
+
+static void msm_stop_tx(struct uart_port *port)
+{
+ struct msm_port *msm_port = UART_TO_MSM(port);
+
+ msm_port->imr &= ~UART_IMR_TXLEV;
+ msm_write(port, msm_port->imr, UART_IMR);
+}
+
+static void msm_start_tx(struct uart_port *port)
+{
+ struct msm_port *msm_port = UART_TO_MSM(port);
+
+ msm_port->imr |= UART_IMR_TXLEV;
+ msm_write(port, msm_port->imr, UART_IMR);
+}
+
+static void msm_stop_rx(struct uart_port *port)
+{
+ struct msm_port *msm_port = UART_TO_MSM(port);
+
+ msm_port->imr &= ~(UART_IMR_RXLEV | UART_IMR_RXSTALE);
+ msm_write(port, msm_port->imr, UART_IMR);
+}
+
+static void msm_enable_ms(struct uart_port *port)
+{
+ struct msm_port *msm_port = UART_TO_MSM(port);
+
+ msm_port->imr |= UART_IMR_DELTA_CTS;
+ msm_write(port, msm_port->imr, UART_IMR);
+}
+
+static void handle_rx(struct uart_port *port)
+{
+ struct tty_struct *tty = port->info->port.tty;
+ unsigned int sr;
+
+ /*
+ * Handle overrun. My understanding of the hardware is that overrun
+ * is not tied to the RX buffer, so we handle the case out of band.
+ */
+ if ((msm_read(port, UART_SR) & UART_SR_OVERRUN)) {
+ port->icount.overrun++;
+ tty_insert_flip_char(tty, 0, TTY_OVERRUN);
+ msm_write(port, UART_CR_CMD_RESET_ERR, UART_CR);
+ }
+
+ /* and now the main RX loop */
+ while ((sr = msm_read(port, UART_SR)) & UART_SR_RX_READY) {
+ unsigned int c;
+ char flag = TTY_NORMAL;
+
+ c = msm_read(port, UART_RF);
+
+ if (sr & UART_SR_RX_BREAK) {
+ port->icount.brk++;
+ if (uart_handle_break(port))
+ continue;
+ } else if (sr & UART_SR_PAR_FRAME_ERR) {
+ port->icount.frame++;
+ } else {
+ port->icount.rx++;
+ }
+
+ /* Mask conditions we're ignorning. */
+ sr &= port->read_status_mask;
+
+ if (sr & UART_SR_RX_BREAK) {
+ flag = TTY_BREAK;
+ } else if (sr & UART_SR_PAR_FRAME_ERR) {
+ flag = TTY_FRAME;
+ }
+
+ if (!uart_handle_sysrq_char(port, c))
+ tty_insert_flip_char(tty, c, flag);
+ }
+
+ tty_flip_buffer_push(tty);
+}
+
+static void handle_tx(struct uart_port *port)
+{
+ struct circ_buf *xmit = &port->info->xmit;
+ struct msm_port *msm_port = UART_TO_MSM(port);
+ int sent_tx;
+
+ if (port->x_char) {
+ msm_write(port, port->x_char, UART_TF);
+ port->icount.tx++;
+ port->x_char = 0;
+ }
+
+ while (msm_read(port, UART_SR) & UART_SR_TX_READY) {
+ if (uart_circ_empty(xmit)) {
+ /* disable tx interrupts */
+ msm_port->imr &= ~UART_IMR_TXLEV;
+ msm_write(port, msm_port->imr, UART_IMR);
+ break;
+ }
+
+ msm_write(port, xmit->buf[xmit->tail], UART_TF);
+
+ xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
+ port->icount.tx++;
+ sent_tx = 1;
+ }
+
+ if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
+ uart_write_wakeup(port);
+}
+
+static void handle_delta_cts(struct uart_port *port)
+{
+ msm_write(port, UART_CR_CMD_RESET_CTS, UART_CR);
+ port->icount.cts++;
+ wake_up_interruptible(&port->info->delta_msr_wait);
+}
+
+static irqreturn_t msm_irq(int irq, void *dev_id)
+{
+ struct uart_port *port = dev_id;
+ struct msm_port *msm_port = UART_TO_MSM(port);
+ unsigned int misr;
+
+ spin_lock(&port->lock);
+ misr = msm_read(port, UART_MISR);
+ msm_write(port, 0, UART_IMR); /* disable interrupt */
+
+ if (misr & (UART_IMR_RXLEV | UART_IMR_RXSTALE))
+ handle_rx(port);
+ if (misr & UART_IMR_TXLEV)
+ handle_tx(port);
+ if (misr & UART_IMR_DELTA_CTS)
+ handle_delta_cts(port);
+
+ msm_write(port, msm_port->imr, UART_IMR); /* restore interrupt */
+ spin_unlock(&port->lock);
+
+ return IRQ_HANDLED;
+}
+
+static unsigned int msm_tx_empty(struct uart_port *port)
+{
+ return (msm_read(port, UART_SR) & UART_SR_TX_EMPTY) ? TIOCSER_TEMT : 0;
+}
+
+static unsigned int msm_get_mctrl(struct uart_port *port)
+{
+ return TIOCM_CAR | TIOCM_CTS | TIOCM_DSR | TIOCM_RTS;
+}
+
+static void msm_set_mctrl(struct uart_port *port, unsigned int mctrl)
+{
+ unsigned int mr;
+
+ mr = msm_read(port, UART_MR1);
+
+ if (!(mctrl & TIOCM_RTS)) {
+ mr &= ~UART_MR1_RX_RDY_CTL;
+ msm_write(port, mr, UART_MR1);
+ msm_write(port, UART_CR_CMD_RESET_RFR, UART_CR);
+ } else {
+ mr |= UART_MR1_RX_RDY_CTL;
+ msm_write(port, mr, UART_MR1);
+ }
+}
+
+static void msm_break_ctl(struct uart_port *port, int break_ctl)
+{
+ if (break_ctl)
+ msm_write(port, UART_CR_CMD_START_BREAK, UART_CR);
+ else
+ msm_write(port, UART_CR_CMD_STOP_BREAK, UART_CR);
+}
+
+static int msm_set_baud_rate(struct uart_port *port, unsigned int baud)
+{
+ unsigned int baud_code, rxstale, watermark;
+
+ switch (baud) {
+ case 300:
+ baud_code = UART_CSR_300;
+ rxstale = 1;
+ break;
+ case 600:
+ baud_code = UART_CSR_600;
+ rxstale = 1;
+ break;
+ case 1200:
+ baud_code = UART_CSR_1200;
+ rxstale = 1;
+ break;
+ case 2400:
+ baud_code = UART_CSR_2400;
+ rxstale = 1;
+ break;
+ case 4800:
+ baud_code = UART_CSR_4800;
+ rxstale = 1;
+ break;
+ case 9600:
+ baud_code = UART_CSR_9600;
+ rxstale = 2;
+ break;
+ case 14400:
+ baud_code = UART_CSR_14400;
+ rxstale = 3;
+ break;
+ case 19200:
+ baud_code = UART_CSR_19200;
+ rxstale = 4;
+ break;
+ case 28800:
+ baud_code = UART_CSR_28800;
+ rxstale = 6;
+ break;
+ case 38400:
+ baud_code = UART_CSR_38400;
+ rxstale = 8;
+ break;
+ case 57600:
+ baud_code = UART_CSR_57600;
+ rxstale = 16;
+ break;
+ case 115200:
+ default:
+ baud_code = UART_CSR_115200;
+ baud = 115200;
+ rxstale = 31;
+ break;
+ }
+
+ msm_write(port, baud_code, UART_CSR);
+
+ /* RX stale watermark */
+ watermark = UART_IPR_STALE_LSB & rxstale;
+ watermark |= UART_IPR_RXSTALE_LAST;
+ watermark |= UART_IPR_STALE_TIMEOUT_MSB & (rxstale << 2);
+ msm_write(port, watermark, UART_IPR);
+
+ /* set RX watermark */
+ watermark = (port->fifosize * 3) / 4;
+ msm_write(port, watermark, UART_RFWR);
+
+ /* set TX watermark */
+ msm_write(port, 10, UART_TFWR);
+
+ return baud;
+}
+
+static void msm_reset(struct uart_port *port)
+{
+ /* reset everything */
+ msm_write(port, UART_CR_CMD_RESET_RX, UART_CR);
+ msm_write(port, UART_CR_CMD_RESET_TX, UART_CR);
+ msm_write(port, UART_CR_CMD_RESET_ERR, UART_CR);
+ msm_write(port, UART_CR_CMD_RESET_BREAK_INT, UART_CR);
+ msm_write(port, UART_CR_CMD_RESET_CTS, UART_CR);
+ msm_write(port, UART_CR_CMD_SET_RFR, UART_CR);
+}
+
+static void msm_init_clock(struct uart_port *port)
+{
+ struct msm_port *msm_port = UART_TO_MSM(port);
+
+ clk_enable(msm_port->clk);
+
+ msm_write(port, 0xC0, UART_MREG);
+ msm_write(port, 0xB2, UART_NREG);
+ msm_write(port, 0x7D, UART_DREG);
+ msm_write(port, 0x1C, UART_MNDREG);
+}
+
+static int msm_startup(struct uart_port *port)
+{
+ struct msm_port *msm_port = UART_TO_MSM(port);
+ unsigned int data, rfr_level;
+ int ret;
+
+ snprintf(msm_port->name, sizeof(msm_port->name),
+ "msm_serial%d", port->line);
+
+ ret = request_irq(port->irq, msm_irq, IRQF_TRIGGER_HIGH,
+ msm_port->name, port);
+ if (unlikely(ret))
+ return ret;
+
+ msm_init_clock(port);
+
+ if (likely(port->fifosize > 12))
+ rfr_level = port->fifosize - 12;
+ else
+ rfr_level = port->fifosize;
+
+ /* set automatic RFR level */
+ data = msm_read(port, UART_MR1);
+ data &= ~UART_MR1_AUTO_RFR_LEVEL1;
+ data &= ~UART_MR1_AUTO_RFR_LEVEL0;
+ data |= UART_MR1_AUTO_RFR_LEVEL1 & (rfr_level << 2);
+ data |= UART_MR1_AUTO_RFR_LEVEL0 & rfr_level;
+ msm_write(port, data, UART_MR1);
+
+ /* make sure that RXSTALE count is non-zero */
+ data = msm_read(port, UART_IPR);
+ if (unlikely(!data)) {
+ data |= UART_IPR_RXSTALE_LAST;
+ data |= UART_IPR_STALE_LSB;
+ msm_write(port, data, UART_IPR);
+ }
+
+ msm_reset(port);
+
+ msm_write(port, 0x05, UART_CR); /* enable TX & RX */
+
+ /* turn on RX and CTS interrupts */
+ msm_port->imr = UART_IMR_RXLEV | UART_IMR_RXSTALE |
+ UART_IMR_CURRENT_CTS;
+ msm_write(port, msm_port->imr, UART_IMR);
+
+ return 0;
+}
+
+static void msm_shutdown(struct uart_port *port)
+{
+ struct msm_port *msm_port = UART_TO_MSM(port);
+
+ msm_port->imr = 0;
+ msm_write(port, 0, UART_IMR); /* disable interrupts */
+
+ clk_disable(msm_port->clk);
+
+ free_irq(port->irq, port);
+}
+
+static void msm_set_termios(struct uart_port *port, struct ktermios *termios,
+ struct ktermios *old)
+{
+ unsigned long flags;
+ unsigned int baud, mr;
+
+ spin_lock_irqsave(&port->lock, flags);
+
+ /* calculate and set baud rate */
+ baud = uart_get_baud_rate(port, termios, old, 300, 115200);
+ baud = msm_set_baud_rate(port, baud);
+ if (tty_termios_baud_rate(termios))
+ tty_termios_encode_baud_rate(termios, baud, baud);
+
+ /* calculate parity */
+ mr = msm_read(port, UART_MR2);
+ mr &= ~UART_MR2_PARITY_MODE;
+ if (termios->c_cflag & PARENB) {
+ if (termios->c_cflag & PARODD)
+ mr |= UART_MR2_PARITY_MODE_ODD;
+ else if (termios->c_cflag & CMSPAR)
+ mr |= UART_MR2_PARITY_MODE_SPACE;
+ else
+ mr |= UART_MR2_PARITY_MODE_EVEN;
+ }
+
+ /* calculate bits per char */
+ mr &= ~UART_MR2_BITS_PER_CHAR;
+ switch (termios->c_cflag & CSIZE) {
+ case CS5:
+ mr |= UART_MR2_BITS_PER_CHAR_5;
+ break;
+ case CS6:
+ mr |= UART_MR2_BITS_PER_CHAR_6;
+ break;
+ case CS7:
+ mr |= UART_MR2_BITS_PER_CHAR_7;
+ break;
+ case CS8:
+ default:
+ mr |= UART_MR2_BITS_PER_CHAR_8;
+ break;
+ }
+
+ /* calculate stop bits */
+ mr &= ~(UART_MR2_STOP_BIT_LEN_ONE | UART_MR2_STOP_BIT_LEN_TWO);
+ if (termios->c_cflag & CSTOPB)
+ mr |= UART_MR2_STOP_BIT_LEN_TWO;
+ else
+ mr |= UART_MR2_STOP_BIT_LEN_ONE;
+
+ /* set parity, bits per char, and stop bit */
+ msm_write(port, mr, UART_MR2);
+
+ /* calculate and set hardware flow control */
+ mr = msm_read(port, UART_MR1);
+ mr &= ~(UART_MR1_CTS_CTL | UART_MR1_RX_RDY_CTL);
+ if (termios->c_cflag & CRTSCTS) {
+ mr |= UART_MR1_CTS_CTL;
+ mr |= UART_MR1_RX_RDY_CTL;
+ }
+ msm_write(port, mr, UART_MR1);
+
+ /* Configure status bits to ignore based on termio flags. */
+ port->read_status_mask = 0;
+ if (termios->c_iflag & INPCK)
+ port->read_status_mask |= UART_SR_PAR_FRAME_ERR;
+ if (termios->c_iflag & (BRKINT | PARMRK))
+ port->read_status_mask |= UART_SR_RX_BREAK;
+
+ uart_update_timeout(port, termios->c_cflag, baud);
+
+ spin_unlock_irqrestore(&port->lock, flags);
+}
+
+static const char *msm_type(struct uart_port *port)
+{
+ return "MSM";
+}
+
+static void msm_release_port(struct uart_port *port)
+{
+ struct platform_device *pdev = to_platform_device(port->dev);
+ struct resource *resource;
+ resource_size_t size;
+
+ resource = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (unlikely(!resource))
+ return;
+ size = resource->end - resource->start + 1;
+
+ release_mem_region(port->mapbase, size);
+ iounmap(port->membase);
+ port->membase = NULL;
+}
+
+static int msm_request_port(struct uart_port *port)
+{
+ struct platform_device *pdev = to_platform_device(port->dev);
+ struct resource *resource;
+ resource_size_t size;
+
+ resource = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (unlikely(!resource))
+ return -ENXIO;
+ size = resource->end - resource->start + 1;
+
+ if (unlikely(!request_mem_region(port->mapbase, size, "msm_serial")))
+ return -EBUSY;
+
+ port->membase = ioremap(port->mapbase, size);
+ if (!port->membase) {
+ release_mem_region(port->mapbase, size);
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+static void msm_config_port(struct uart_port *port, int flags)
+{
+ if (flags & UART_CONFIG_TYPE) {
+ port->type = PORT_MSM;
+ msm_request_port(port);
+ }
+}
+
+static int msm_verify_port(struct uart_port *port, struct serial_struct *ser)
+{
+ if (unlikely(ser->type != PORT_UNKNOWN && ser->type != PORT_MSM))
+ return -EINVAL;
+ if (unlikely(port->irq != ser->irq))
+ return -EINVAL;
+ return 0;
+}
+
+static void msm_power(struct uart_port *port, unsigned int state,
+ unsigned int oldstate)
+{
+ struct msm_port *msm_port = UART_TO_MSM(port);
+
+ switch (state) {
+ case 0:
+ clk_enable(msm_port->clk);
+ break;
+ case 3:
+ clk_disable(msm_port->clk);
+ break;
+ default:
+ printk(KERN_ERR "msm_serial: Unknown PM state %d\n", state);
+ }
+}
+
+static struct uart_ops msm_uart_pops = {
+ .tx_empty = msm_tx_empty,
+ .set_mctrl = msm_set_mctrl,
+ .get_mctrl = msm_get_mctrl,
+ .stop_tx = msm_stop_tx,
+ .start_tx = msm_start_tx,
+ .stop_rx = msm_stop_rx,
+ .enable_ms = msm_enable_ms,
+ .break_ctl = msm_break_ctl,
+ .startup = msm_startup,
+ .shutdown = msm_shutdown,
+ .set_termios = msm_set_termios,
+ .type = msm_type,
+ .release_port = msm_release_port,
+ .request_port = msm_request_port,
+ .config_port = msm_config_port,
+ .verify_port = msm_verify_port,
+ .pm = msm_power,
+};
+
+static struct msm_port msm_uart_ports[] = {
+ {
+ .uart = {
+ .iotype = UPIO_MEM,
+ .ops = &msm_uart_pops,
+ .flags = UPF_BOOT_AUTOCONF,
+ .fifosize = 512,
+ .line = 0,
+ },
+ },
+ {
+ .uart = {
+ .iotype = UPIO_MEM,
+ .ops = &msm_uart_pops,
+ .flags = UPF_BOOT_AUTOCONF,
+ .fifosize = 512,
+ .line = 1,
+ },
+ },
+ {
+ .uart = {
+ .iotype = UPIO_MEM,
+ .ops = &msm_uart_pops,
+ .flags = UPF_BOOT_AUTOCONF,
+ .fifosize = 64,
+ .line = 2,
+ },
+ },
+};
+
+#define UART_NR ARRAY_SIZE(msm_uart_ports)
+
+static inline struct uart_port *get_port_from_line(unsigned int line)
+{
+ return &msm_uart_ports[line].uart;
+}
+
+#ifdef CONFIG_SERIAL_MSM_CONSOLE
+
+static void msm_console_putchar(struct uart_port *port, int c)
+{
+ while (!(msm_read(port, UART_SR) & UART_SR_TX_READY))
+ ;
+ msm_write(port, c, UART_TF);
+}
+
+static void msm_console_write(struct console *co, const char *s,
+ unsigned int count)
+{
+ struct uart_port *port;
+ struct msm_port *msm_port;
+
+ BUG_ON(co->index < 0 || co->index >= UART_NR);
+
+ port = get_port_from_line(co->index);
+ msm_port = UART_TO_MSM(port);
+
+ spin_lock(&port->lock);
+ uart_console_write(port, s, count, msm_console_putchar);
+ spin_unlock(&port->lock);
+}
+
+static int __init msm_console_setup(struct console *co, char *options)
+{
+ struct uart_port *port;
+ int baud, flow, bits, parity;
+
+ if (unlikely(co->index >= UART_NR || co->index < 0))
+ return -ENXIO;
+
+ port = get_port_from_line(co->index);
+
+ if (unlikely(!port->membase))
+ return -ENXIO;
+
+ port->cons = co;
+
+ msm_init_clock(port);
+
+ if (options)
+ uart_parse_options(options, &baud, &parity, &bits, &flow);
+
+ bits = 8;
+ parity = 'n';
+ flow = 'n';
+ msm_write(port, UART_MR2_BITS_PER_CHAR_8 | UART_MR2_STOP_BIT_LEN_ONE,
+ UART_MR2); /* 8N1 */
+
+ if (baud < 300 || baud > 115200)
+ baud = 115200;
+ msm_set_baud_rate(port, baud);
+
+ msm_reset(port);
+
+ printk(KERN_INFO "msm_serial: console setup on port #%d\n", port->line);
+
+ return uart_set_options(port, co, baud, parity, bits, flow);
+}
+
+static struct uart_driver msm_uart_driver;
+
+static struct console msm_console = {
+ .name = "ttyMSM",
+ .write = msm_console_write,
+ .device = uart_console_device,
+ .setup = msm_console_setup,
+ .flags = CON_PRINTBUFFER,
+ .index = -1,
+ .data = &msm_uart_driver,
+};
+
+#define MSM_CONSOLE (&msm_console)
+
+#else
+#define MSM_CONSOLE NULL
+#endif
+
+static struct uart_driver msm_uart_driver = {
+ .owner = THIS_MODULE,
+ .driver_name = "msm_serial",
+ .dev_name = "ttyMSM",
+ .nr = UART_NR,
+ .cons = MSM_CONSOLE,
+};
+
+static int __init msm_serial_probe(struct platform_device *pdev)
+{
+ struct msm_port *msm_port;
+ struct resource *resource;
+ struct uart_port *port;
+
+ if (unlikely(pdev->id < 0 || pdev->id >= UART_NR))
+ return -ENXIO;
+
+ printk(KERN_INFO "msm_serial: detected port #%d\n", pdev->id);
+
+ port = get_port_from_line(pdev->id);
+ port->dev = &pdev->dev;
+ msm_port = UART_TO_MSM(port);
+
+ msm_port->clk = clk_get(&pdev->dev, "uart_clk");
+ if (unlikely(IS_ERR(msm_port->clk)))
+ return PTR_ERR(msm_port->clk);
+ port->uartclk = clk_get_rate(msm_port->clk);
+
+ resource = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (unlikely(!resource))
+ return -ENXIO;
+ port->mapbase = resource->start;
+
+ port->irq = platform_get_irq(pdev, 0);
+ if (unlikely(port->irq < 0))
+ return -ENXIO;
+
+ platform_set_drvdata(pdev, port);
+
+ return uart_add_one_port(&msm_uart_driver, port);
+}
+
+static int __devexit msm_serial_remove(struct platform_device *pdev)
+{
+ struct msm_port *msm_port = platform_get_drvdata(pdev);
+
+ clk_put(msm_port->clk);
+
+ return 0;
+}
+
+static struct platform_driver msm_platform_driver = {
+ .probe = msm_serial_probe,
+ .remove = msm_serial_remove,
+ .driver = {
+ .name = "msm_serial",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init msm_serial_init(void)
+{
+ int ret;
+
+ ret = uart_register_driver(&msm_uart_driver);
+ if (unlikely(ret))
+ return ret;
+
+ ret = platform_driver_probe(&msm_platform_driver, msm_serial_probe);
+ if (unlikely(ret))
+ uart_unregister_driver(&msm_uart_driver);
+
+ printk(KERN_INFO "msm_serial: driver initialized\n");
+
+ return ret;
+}
+
+static void __exit msm_serial_exit(void)
+{
+#ifdef CONFIG_SERIAL_MSM_CONSOLE
+ unregister_console(&msm_console);
+#endif
+ platform_driver_unregister(&msm_platform_driver);
+ uart_unregister_driver(&msm_uart_driver);
+}
+
+module_init(msm_serial_init);
+module_exit(msm_serial_exit);
+
+MODULE_AUTHOR("Robert Love <rlove@google.com>");
+MODULE_DESCRIPTION("Driver for msm7x serial device");
+MODULE_LICENSE("GPL");
diff --git a/drivers/serial/msm_serial.h b/drivers/serial/msm_serial.h
new file mode 100644
index 000000000000..689f1fa0e84e
--- /dev/null
+++ b/drivers/serial/msm_serial.h
@@ -0,0 +1,117 @@
+/*
+ * drivers/serial/msm_serial.h
+ *
+ * Copyright (C) 2007 Google, Inc.
+ * Author: Robert Love <rlove@google.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __DRIVERS_SERIAL_MSM_SERIAL_H
+#define __DRIVERS_SERIAL_MSM_SERIAL_H
+
+#define UART_MR1 0x0000
+
+#define UART_MR1_AUTO_RFR_LEVEL0 0x3F
+#define UART_MR1_AUTO_RFR_LEVEL1 0x3FF00
+#define UART_MR1_RX_RDY_CTL (1 << 7)
+#define UART_MR1_CTS_CTL (1 << 6)
+
+#define UART_MR2 0x0004
+#define UART_MR2_ERROR_MODE (1 << 6)
+#define UART_MR2_BITS_PER_CHAR 0x30
+#define UART_MR2_BITS_PER_CHAR_5 (0x0 << 4)
+#define UART_MR2_BITS_PER_CHAR_6 (0x1 << 4)
+#define UART_MR2_BITS_PER_CHAR_7 (0x2 << 4)
+#define UART_MR2_BITS_PER_CHAR_8 (0x3 << 4)
+#define UART_MR2_STOP_BIT_LEN_ONE (0x1 << 2)
+#define UART_MR2_STOP_BIT_LEN_TWO (0x3 << 2)
+#define UART_MR2_PARITY_MODE_NONE 0x0
+#define UART_MR2_PARITY_MODE_ODD 0x1
+#define UART_MR2_PARITY_MODE_EVEN 0x2
+#define UART_MR2_PARITY_MODE_SPACE 0x3
+#define UART_MR2_PARITY_MODE 0x3
+
+#define UART_CSR 0x0008
+#define UART_CSR_115200 0xFF
+#define UART_CSR_57600 0xEE
+#define UART_CSR_38400 0xDD
+#define UART_CSR_28800 0xCC
+#define UART_CSR_19200 0xBB
+#define UART_CSR_14400 0xAA
+#define UART_CSR_9600 0x99
+#define UART_CSR_4800 0x77
+#define UART_CSR_2400 0x55
+#define UART_CSR_1200 0x44
+#define UART_CSR_600 0x33
+#define UART_CSR_300 0x22
+
+#define UART_TF 0x000C
+
+#define UART_CR 0x0010
+#define UART_CR_CMD_NULL (0 << 4)
+#define UART_CR_CMD_RESET_RX (1 << 4)
+#define UART_CR_CMD_RESET_TX (2 << 4)
+#define UART_CR_CMD_RESET_ERR (3 << 4)
+#define UART_CR_CMD_RESET_BREAK_INT (4 << 4)
+#define UART_CR_CMD_START_BREAK (5 << 4)
+#define UART_CR_CMD_STOP_BREAK (6 << 4)
+#define UART_CR_CMD_RESET_CTS (7 << 4)
+#define UART_CR_CMD_PACKET_MODE (9 << 4)
+#define UART_CR_CMD_MODE_RESET (12 << 4)
+#define UART_CR_CMD_SET_RFR (13 << 4)
+#define UART_CR_CMD_RESET_RFR (14 << 4)
+#define UART_CR_TX_DISABLE (1 << 3)
+#define UART_CR_TX_ENABLE (1 << 3)
+#define UART_CR_RX_DISABLE (1 << 3)
+#define UART_CR_RX_ENABLE (1 << 3)
+
+#define UART_IMR 0x0014
+#define UART_IMR_TXLEV (1 << 0)
+#define UART_IMR_RXSTALE (1 << 3)
+#define UART_IMR_RXLEV (1 << 4)
+#define UART_IMR_DELTA_CTS (1 << 5)
+#define UART_IMR_CURRENT_CTS (1 << 6)
+
+#define UART_IPR_RXSTALE_LAST 0x20
+#define UART_IPR_STALE_LSB 0x1F
+#define UART_IPR_STALE_TIMEOUT_MSB 0x3FF80
+
+#define UART_IPR 0x0018
+#define UART_TFWR 0x001C
+#define UART_RFWR 0x0020
+#define UART_HCR 0x0024
+
+#define UART_MREG 0x0028
+#define UART_NREG 0x002C
+#define UART_DREG 0x0030
+#define UART_MNDREG 0x0034
+#define UART_IRDA 0x0038
+#define UART_MISR_MODE 0x0040
+#define UART_MISR_RESET 0x0044
+#define UART_MISR_EXPORT 0x0048
+#define UART_MISR_VAL 0x004C
+#define UART_TEST_CTRL 0x0050
+
+#define UART_SR 0x0008
+#define UART_SR_HUNT_CHAR (1 << 7)
+#define UART_SR_RX_BREAK (1 << 6)
+#define UART_SR_PAR_FRAME_ERR (1 << 5)
+#define UART_SR_OVERRUN (1 << 4)
+#define UART_SR_TX_EMPTY (1 << 3)
+#define UART_SR_TX_READY (1 << 2)
+#define UART_SR_RX_FULL (1 << 1)
+#define UART_SR_RX_READY (1 << 0)
+
+#define UART_RF 0x000C
+#define UART_MISR 0x0010
+#define UART_ISR 0x0014
+
+#endif /* __DRIVERS_SERIAL_MSM_SERIAL_H */
diff --git a/drivers/serial/s3c2400.c b/drivers/serial/s3c2400.c
index 4873f2978bd2..fb00ed5296e6 100644
--- a/drivers/serial/s3c2400.c
+++ b/drivers/serial/s3c2400.c
@@ -78,7 +78,7 @@ static int s3c2400_serial_probe(struct platform_device *dev)
static struct platform_driver s3c2400_serial_drv = {
.probe = s3c2400_serial_probe,
- .remove = s3c24xx_serial_remove,
+ .remove = __devexit_p(s3c24xx_serial_remove),
.driver = {
.name = "s3c2400-uart",
.owner = THIS_MODULE,
diff --git a/drivers/serial/s3c2410.c b/drivers/serial/s3c2410.c
index 87c182ef71b8..b5d7cbcba2ae 100644
--- a/drivers/serial/s3c2410.c
+++ b/drivers/serial/s3c2410.c
@@ -90,7 +90,7 @@ static int s3c2410_serial_probe(struct platform_device *dev)
static struct platform_driver s3c2410_serial_drv = {
.probe = s3c2410_serial_probe,
- .remove = s3c24xx_serial_remove,
+ .remove = __devexit_p(s3c24xx_serial_remove),
.driver = {
.name = "s3c2410-uart",
.owner = THIS_MODULE,
diff --git a/drivers/serial/s3c2412.c b/drivers/serial/s3c2412.c
index fd017b375568..11dcb90bdfef 100644
--- a/drivers/serial/s3c2412.c
+++ b/drivers/serial/s3c2412.c
@@ -123,7 +123,7 @@ static int s3c2412_serial_probe(struct platform_device *dev)
static struct platform_driver s3c2412_serial_drv = {
.probe = s3c2412_serial_probe,
- .remove = s3c24xx_serial_remove,
+ .remove = __devexit_p(s3c24xx_serial_remove),
.driver = {
.name = "s3c2412-uart",
.owner = THIS_MODULE,
diff --git a/drivers/serial/s3c2440.c b/drivers/serial/s3c2440.c
index 29cbb0afef8e..06c5b0cc47a3 100644
--- a/drivers/serial/s3c2440.c
+++ b/drivers/serial/s3c2440.c
@@ -153,7 +153,7 @@ static int s3c2440_serial_probe(struct platform_device *dev)
static struct platform_driver s3c2440_serial_drv = {
.probe = s3c2440_serial_probe,
- .remove = s3c24xx_serial_remove,
+ .remove = __devexit_p(s3c24xx_serial_remove),
.driver = {
.name = "s3c2440-uart",
.owner = THIS_MODULE,
diff --git a/drivers/serial/s3c24a0.c b/drivers/serial/s3c24a0.c
index ebf2fd3c8f7d..786a067d62ac 100644
--- a/drivers/serial/s3c24a0.c
+++ b/drivers/serial/s3c24a0.c
@@ -94,7 +94,7 @@ static int s3c24a0_serial_probe(struct platform_device *dev)
static struct platform_driver s3c24a0_serial_drv = {
.probe = s3c24a0_serial_probe,
- .remove = s3c24xx_serial_remove,
+ .remove = __devexit_p(s3c24xx_serial_remove),
.driver = {
.name = "s3c24a0-uart",
.owner = THIS_MODULE,
diff --git a/drivers/serial/s3c6400.c b/drivers/serial/s3c6400.c
index 3e3785233682..48f1a3781f0d 100644
--- a/drivers/serial/s3c6400.c
+++ b/drivers/serial/s3c6400.c
@@ -124,7 +124,7 @@ static int s3c6400_serial_probe(struct platform_device *dev)
static struct platform_driver s3c6400_serial_drv = {
.probe = s3c6400_serial_probe,
- .remove = s3c24xx_serial_remove,
+ .remove = __devexit_p(s3c24xx_serial_remove),
.driver = {
.name = "s3c6400-uart",
.owner = THIS_MODULE,
diff --git a/drivers/serial/samsung.c b/drivers/serial/samsung.c
index 93b5d75db126..c8851a0db63a 100644
--- a/drivers/serial/samsung.c
+++ b/drivers/serial/samsung.c
@@ -1174,7 +1174,7 @@ int s3c24xx_serial_probe(struct platform_device *dev,
EXPORT_SYMBOL_GPL(s3c24xx_serial_probe);
-int s3c24xx_serial_remove(struct platform_device *dev)
+int __devexit s3c24xx_serial_remove(struct platform_device *dev)
{
struct uart_port *port = s3c24xx_dev_to_port(&dev->dev);
diff --git a/drivers/serial/samsung.h b/drivers/serial/samsung.h
index 7afb94843a08..d3fe315969f6 100644
--- a/drivers/serial/samsung.h
+++ b/drivers/serial/samsung.h
@@ -72,7 +72,7 @@ struct s3c24xx_uart_port {
extern int s3c24xx_serial_probe(struct platform_device *dev,
struct s3c24xx_uart_info *uart);
-extern int s3c24xx_serial_remove(struct platform_device *dev);
+extern int __devexit s3c24xx_serial_remove(struct platform_device *dev);
extern int s3c24xx_serial_initconsole(struct platform_driver *drv,
struct s3c24xx_uart_info *uart);
diff --git a/drivers/serial/sb1250-duart.c b/drivers/serial/sb1250-duart.c
index a4fb343a08da..319e8b83f6be 100644
--- a/drivers/serial/sb1250-duart.c
+++ b/drivers/serial/sb1250-duart.c
@@ -204,7 +204,7 @@ static int sbd_receive_drain(struct sbd_port *sport)
{
int loops = 10000;
- while (sbd_receive_ready(sport) && loops--)
+ while (sbd_receive_ready(sport) && --loops)
read_sbdchn(sport, R_DUART_RX_HOLD);
return loops;
}
@@ -218,7 +218,7 @@ static int __maybe_unused sbd_transmit_drain(struct sbd_port *sport)
{
int loops = 10000;
- while (!sbd_transmit_ready(sport) && loops--)
+ while (!sbd_transmit_ready(sport) && --loops)
udelay(2);
return loops;
}
@@ -232,7 +232,7 @@ static int sbd_line_drain(struct sbd_port *sport)
{
int loops = 10000;
- while (!sbd_transmit_empty(sport) && loops--)
+ while (!sbd_transmit_empty(sport) && --loops)
udelay(2);
return loops;
}
diff --git a/drivers/serial/serial_txx9.c b/drivers/serial/serial_txx9.c
index 7313c2edcb83..54dd16d66a4b 100644
--- a/drivers/serial/serial_txx9.c
+++ b/drivers/serial/serial_txx9.c
@@ -461,6 +461,94 @@ static void serial_txx9_break_ctl(struct uart_port *port, int break_state)
spin_unlock_irqrestore(&up->port.lock, flags);
}
+#if defined(CONFIG_SERIAL_TXX9_CONSOLE) || (CONFIG_CONSOLE_POLL)
+/*
+ * Wait for transmitter & holding register to empty
+ */
+static void wait_for_xmitr(struct uart_txx9_port *up)
+{
+ unsigned int tmout = 10000;
+
+ /* Wait up to 10ms for the character(s) to be sent. */
+ while (--tmout &&
+ !(sio_in(up, TXX9_SICISR) & TXX9_SICISR_TXALS))
+ udelay(1);
+
+ /* Wait up to 1s for flow control if necessary */
+ if (up->port.flags & UPF_CONS_FLOW) {
+ tmout = 1000000;
+ while (--tmout &&
+ (sio_in(up, TXX9_SICISR) & TXX9_SICISR_CTSS))
+ udelay(1);
+ }
+}
+#endif
+
+#ifdef CONFIG_CONSOLE_POLL
+/*
+ * Console polling routines for writing and reading from the uart while
+ * in an interrupt or debug context.
+ */
+
+static int serial_txx9_get_poll_char(struct uart_port *port)
+{
+ unsigned int ier;
+ unsigned char c;
+ struct uart_txx9_port *up = (struct uart_txx9_port *)port;
+
+ /*
+ * First save the IER then disable the interrupts
+ */
+ ier = sio_in(up, TXX9_SIDICR);
+ sio_out(up, TXX9_SIDICR, 0);
+
+ while (sio_in(up, TXX9_SIDISR) & TXX9_SIDISR_UVALID)
+ ;
+
+ c = sio_in(up, TXX9_SIRFIFO);
+
+ /*
+ * Finally, clear RX interrupt status
+ * and restore the IER
+ */
+ sio_mask(up, TXX9_SIDISR, TXX9_SIDISR_RDIS);
+ sio_out(up, TXX9_SIDICR, ier);
+ return c;
+}
+
+
+static void serial_txx9_put_poll_char(struct uart_port *port, unsigned char c)
+{
+ unsigned int ier;
+ struct uart_txx9_port *up = (struct uart_txx9_port *)port;
+
+ /*
+ * First save the IER then disable the interrupts
+ */
+ ier = sio_in(up, TXX9_SIDICR);
+ sio_out(up, TXX9_SIDICR, 0);
+
+ wait_for_xmitr(up);
+ /*
+ * Send the character out.
+ * If a LF, also do CR...
+ */
+ sio_out(up, TXX9_SITFIFO, c);
+ if (c == 10) {
+ wait_for_xmitr(up);
+ sio_out(up, TXX9_SITFIFO, 13);
+ }
+
+ /*
+ * Finally, wait for transmitter to become empty
+ * and restore the IER
+ */
+ wait_for_xmitr(up);
+ sio_out(up, TXX9_SIDICR, ier);
+}
+
+#endif /* CONFIG_CONSOLE_POLL */
+
static int serial_txx9_startup(struct uart_port *port)
{
struct uart_txx9_port *up = (struct uart_txx9_port *)port;
@@ -781,6 +869,10 @@ static struct uart_ops serial_txx9_pops = {
.release_port = serial_txx9_release_port,
.request_port = serial_txx9_request_port,
.config_port = serial_txx9_config_port,
+#ifdef CONFIG_CONSOLE_POLL
+ .poll_get_char = serial_txx9_get_poll_char,
+ .poll_put_char = serial_txx9_put_poll_char,
+#endif
};
static struct uart_txx9_port serial_txx9_ports[UART_NR];
@@ -803,27 +895,6 @@ static void __init serial_txx9_register_ports(struct uart_driver *drv,
#ifdef CONFIG_SERIAL_TXX9_CONSOLE
-/*
- * Wait for transmitter & holding register to empty
- */
-static inline void wait_for_xmitr(struct uart_txx9_port *up)
-{
- unsigned int tmout = 10000;
-
- /* Wait up to 10ms for the character(s) to be sent. */
- while (--tmout &&
- !(sio_in(up, TXX9_SICISR) & TXX9_SICISR_TXALS))
- udelay(1);
-
- /* Wait up to 1s for flow control if necessary */
- if (up->port.flags & UPF_CONS_FLOW) {
- tmout = 1000000;
- while (--tmout &&
- (sio_in(up, TXX9_SICISR) & TXX9_SICISR_CTSS))
- udelay(1);
- }
-}
-
static void serial_txx9_console_putchar(struct uart_port *port, int ch)
{
struct uart_txx9_port *up = (struct uart_txx9_port *)port;
diff --git a/drivers/serial/sh-sci.c b/drivers/serial/sh-sci.c
index 66f52674ca0c..8e2feb563347 100644
--- a/drivers/serial/sh-sci.c
+++ b/drivers/serial/sh-sci.c
@@ -707,24 +707,25 @@ static irqreturn_t sci_br_interrupt(int irq, void *ptr)
static irqreturn_t sci_mpxed_interrupt(int irq, void *ptr)
{
- unsigned short ssr_status, scr_status;
+ unsigned short ssr_status, scr_status, err_enabled;
struct uart_port *port = ptr;
irqreturn_t ret = IRQ_NONE;
ssr_status = sci_in(port, SCxSR);
scr_status = sci_in(port, SCSCR);
+ err_enabled = scr_status & (SCI_CTRL_FLAGS_REIE | SCI_CTRL_FLAGS_RIE);
/* Tx Interrupt */
- if ((ssr_status & 0x0020) && (scr_status & SCI_CTRL_FLAGS_TIE))
+ if ((ssr_status & SCxSR_TDxE(port)) && (scr_status & SCI_CTRL_FLAGS_TIE))
ret = sci_tx_interrupt(irq, ptr);
/* Rx Interrupt */
- if ((ssr_status & 0x0002) && (scr_status & SCI_CTRL_FLAGS_RIE))
+ if ((ssr_status & SCxSR_RDxF(port)) && (scr_status & SCI_CTRL_FLAGS_RIE))
ret = sci_rx_interrupt(irq, ptr);
/* Error Interrupt */
- if ((ssr_status & 0x0080) && (scr_status & SCI_CTRL_FLAGS_REIE))
+ if ((ssr_status & SCxSR_ERRORS(port)) && err_enabled)
ret = sci_er_interrupt(irq, ptr);
/* Break Interrupt */
- if ((ssr_status & 0x0010) && (scr_status & SCI_CTRL_FLAGS_REIE))
+ if ((ssr_status & SCxSR_BRK(port)) && err_enabled)
ret = sci_br_interrupt(irq, ptr);
return ret;
diff --git a/drivers/serial/sunhv.c b/drivers/serial/sunhv.c
index a94a2ab4b571..1df5325faab2 100644
--- a/drivers/serial/sunhv.c
+++ b/drivers/serial/sunhv.c
@@ -461,7 +461,7 @@ static void sunhv_console_write_paged(struct console *con, const char *s, unsign
break;
udelay(1);
}
- if (limit <= 0)
+ if (limit < 0)
break;
page_bytes -= written;
ra += written;
diff --git a/drivers/serial/timbuart.c b/drivers/serial/timbuart.c
index ac9e5d5f742e..063a313b755c 100644
--- a/drivers/serial/timbuart.c
+++ b/drivers/serial/timbuart.c
@@ -33,29 +33,29 @@ struct timbuart_port {
struct uart_port port;
struct tasklet_struct tasklet;
int usedma;
- u8 last_ier;
+ u32 last_ier;
struct platform_device *dev;
};
static int baudrates[] = {9600, 19200, 38400, 57600, 115200, 230400, 460800,
921600, 1843200, 3250000};
-static void timbuart_mctrl_check(struct uart_port *port, u8 isr, u8 *ier);
+static void timbuart_mctrl_check(struct uart_port *port, u32 isr, u32 *ier);
static irqreturn_t timbuart_handleinterrupt(int irq, void *devid);
static void timbuart_stop_rx(struct uart_port *port)
{
/* spin lock held by upper layer, disable all RX interrupts */
- u8 ier = ioread8(port->membase + TIMBUART_IER) & ~RXFLAGS;
- iowrite8(ier, port->membase + TIMBUART_IER);
+ u32 ier = ioread32(port->membase + TIMBUART_IER) & ~RXFLAGS;
+ iowrite32(ier, port->membase + TIMBUART_IER);
}
static void timbuart_stop_tx(struct uart_port *port)
{
/* spinlock held by upper layer, disable TX interrupt */
- u8 ier = ioread8(port->membase + TIMBUART_IER) & ~TXBAE;
- iowrite8(ier, port->membase + TIMBUART_IER);
+ u32 ier = ioread32(port->membase + TIMBUART_IER) & ~TXBAE;
+ iowrite32(ier, port->membase + TIMBUART_IER);
}
static void timbuart_start_tx(struct uart_port *port)
@@ -72,14 +72,14 @@ static void timbuart_flush_buffer(struct uart_port *port)
u8 ctl = ioread8(port->membase + TIMBUART_CTRL) | TIMBUART_CTRL_FLSHTX;
iowrite8(ctl, port->membase + TIMBUART_CTRL);
- iowrite8(TXBF, port->membase + TIMBUART_ISR);
+ iowrite32(TXBF, port->membase + TIMBUART_ISR);
}
static void timbuart_rx_chars(struct uart_port *port)
{
struct tty_struct *tty = port->info->port.tty;
- while (ioread8(port->membase + TIMBUART_ISR) & RXDP) {
+ while (ioread32(port->membase + TIMBUART_ISR) & RXDP) {
u8 ch = ioread8(port->membase + TIMBUART_RXFIFO);
port->icount.rx++;
tty_insert_flip_char(tty, ch, TTY_NORMAL);
@@ -97,7 +97,7 @@ static void timbuart_tx_chars(struct uart_port *port)
{
struct circ_buf *xmit = &port->info->xmit;
- while (!(ioread8(port->membase + TIMBUART_ISR) & TXBF) &&
+ while (!(ioread32(port->membase + TIMBUART_ISR) & TXBF) &&
!uart_circ_empty(xmit)) {
iowrite8(xmit->buf[xmit->tail],
port->membase + TIMBUART_TXFIFO);
@@ -114,7 +114,7 @@ static void timbuart_tx_chars(struct uart_port *port)
ioread8(port->membase + TIMBUART_BAUDRATE));
}
-static void timbuart_handle_tx_port(struct uart_port *port, u8 isr, u8 *ier)
+static void timbuart_handle_tx_port(struct uart_port *port, u32 isr, u32 *ier)
{
struct timbuart_port *uart =
container_of(port, struct timbuart_port, port);
@@ -129,7 +129,7 @@ static void timbuart_handle_tx_port(struct uart_port *port, u8 isr, u8 *ier)
if (isr & TXFLAGS) {
timbuart_tx_chars(port);
/* clear all TX interrupts */
- iowrite8(TXFLAGS, port->membase + TIMBUART_ISR);
+ iowrite32(TXFLAGS, port->membase + TIMBUART_ISR);
if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
uart_write_wakeup(port);
@@ -148,7 +148,7 @@ static void timbuart_handle_tx_port(struct uart_port *port, u8 isr, u8 *ier)
dev_dbg(port->dev, "%s - leaving\n", __func__);
}
-void timbuart_handle_rx_port(struct uart_port *port, u8 isr, u8 *ier)
+void timbuart_handle_rx_port(struct uart_port *port, u32 isr, u32 *ier)
{
if (isr & RXFLAGS) {
/* Some RX status is set */
@@ -161,7 +161,7 @@ void timbuart_handle_rx_port(struct uart_port *port, u8 isr, u8 *ier)
timbuart_rx_chars(port);
/* ack all RX interrupts */
- iowrite8(RXFLAGS, port->membase + TIMBUART_ISR);
+ iowrite32(RXFLAGS, port->membase + TIMBUART_ISR);
}
/* always have the RX interrupts enabled */
@@ -173,11 +173,11 @@ void timbuart_handle_rx_port(struct uart_port *port, u8 isr, u8 *ier)
void timbuart_tasklet(unsigned long arg)
{
struct timbuart_port *uart = (struct timbuart_port *)arg;
- u8 isr, ier = 0;
+ u32 isr, ier = 0;
spin_lock(&uart->port.lock);
- isr = ioread8(uart->port.membase + TIMBUART_ISR);
+ isr = ioread32(uart->port.membase + TIMBUART_ISR);
dev_dbg(uart->port.dev, "%s ISR: %x\n", __func__, isr);
if (!uart->usedma)
@@ -188,7 +188,7 @@ void timbuart_tasklet(unsigned long arg)
if (!uart->usedma)
timbuart_handle_rx_port(&uart->port, isr, &ier);
- iowrite8(ier, uart->port.membase + TIMBUART_IER);
+ iowrite32(ier, uart->port.membase + TIMBUART_IER);
spin_unlock(&uart->port.lock);
dev_dbg(uart->port.dev, "%s leaving\n", __func__);
@@ -196,9 +196,9 @@ void timbuart_tasklet(unsigned long arg)
static unsigned int timbuart_tx_empty(struct uart_port *port)
{
- u8 isr = ioread8(port->membase + TIMBUART_ISR);
+ u32 isr = ioread32(port->membase + TIMBUART_ISR);
- return (isr & TXBAE) ? TIOCSER_TEMT : 0;
+ return (isr & TXBE) ? TIOCSER_TEMT : 0;
}
static unsigned int timbuart_get_mctrl(struct uart_port *port)
@@ -222,13 +222,13 @@ static void timbuart_set_mctrl(struct uart_port *port, unsigned int mctrl)
iowrite8(TIMBUART_CTRL_RTS, port->membase + TIMBUART_CTRL);
}
-static void timbuart_mctrl_check(struct uart_port *port, u8 isr, u8 *ier)
+static void timbuart_mctrl_check(struct uart_port *port, u32 isr, u32 *ier)
{
unsigned int cts;
if (isr & CTS_DELTA) {
/* ack */
- iowrite8(CTS_DELTA, port->membase + TIMBUART_ISR);
+ iowrite32(CTS_DELTA, port->membase + TIMBUART_ISR);
cts = timbuart_get_mctrl(port);
uart_handle_cts_change(port, cts & TIOCM_CTS);
wake_up_interruptible(&port->info->delta_msr_wait);
@@ -255,9 +255,9 @@ static int timbuart_startup(struct uart_port *port)
dev_dbg(port->dev, "%s\n", __func__);
iowrite8(TIMBUART_CTRL_FLSHRX, port->membase + TIMBUART_CTRL);
- iowrite8(0xff, port->membase + TIMBUART_ISR);
+ iowrite32(0x1ff, port->membase + TIMBUART_ISR);
/* Enable all but TX interrupts */
- iowrite8(RXBAF | RXBF | RXTT | CTS_DELTA,
+ iowrite32(RXBAF | RXBF | RXTT | CTS_DELTA,
port->membase + TIMBUART_IER);
return request_irq(port->irq, timbuart_handleinterrupt, IRQF_SHARED,
@@ -270,7 +270,7 @@ static void timbuart_shutdown(struct uart_port *port)
container_of(port, struct timbuart_port, port);
dev_dbg(port->dev, "%s\n", __func__);
free_irq(port->irq, uart);
- iowrite8(0, port->membase + TIMBUART_IER);
+ iowrite32(0, port->membase + TIMBUART_IER);
}
static int get_bindex(int baud)
@@ -359,10 +359,10 @@ static irqreturn_t timbuart_handleinterrupt(int irq, void *devid)
struct timbuart_port *uart = (struct timbuart_port *)devid;
if (ioread8(uart->port.membase + TIMBUART_IPR)) {
- uart->last_ier = ioread8(uart->port.membase + TIMBUART_IER);
+ uart->last_ier = ioread32(uart->port.membase + TIMBUART_IER);
/* disable interrupts, the tasklet enables them again */
- iowrite8(0, uart->port.membase + TIMBUART_IER);
+ iowrite32(0, uart->port.membase + TIMBUART_IER);
/* fire off bottom half */
tasklet_schedule(&uart->tasklet);
diff --git a/drivers/serial/vr41xx_siu.c b/drivers/serial/vr41xx_siu.c
index 0573f3b5175e..dac550e57c29 100644
--- a/drivers/serial/vr41xx_siu.c
+++ b/drivers/serial/vr41xx_siu.c
@@ -1,7 +1,7 @@
/*
* Driver for NEC VR4100 series Serial Interface Unit.
*
- * Copyright (C) 2004-2008 Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp>
+ * Copyright (C) 2004-2008 Yoichi Yuasa <yuasa@linux-mips.org>
*
* Based on drivers/serial/8250.c, by Russell King.
*
diff --git a/drivers/serial/zs.c b/drivers/serial/zs.c
index 9e6a873f8203..d8c2809b1ab6 100644
--- a/drivers/serial/zs.c
+++ b/drivers/serial/zs.c
@@ -231,7 +231,7 @@ static int zs_receive_drain(struct zs_port *zport)
{
int loops = 10000;
- while ((read_zsreg(zport, R0) & Rx_CH_AV) && loops--)
+ while ((read_zsreg(zport, R0) & Rx_CH_AV) && --loops)
read_zsdata(zport);
return loops;
}
@@ -241,7 +241,7 @@ static int zs_transmit_drain(struct zs_port *zport, int irq)
struct zs_scc *scc = zport->scc;
int loops = 10000;
- while (!(read_zsreg(zport, R0) & Tx_BUF_EMP) && loops--) {
+ while (!(read_zsreg(zport, R0) & Tx_BUF_EMP) && --loops) {
zs_spin_unlock_cond_irq(&scc->zlock, irq);
udelay(2);
zs_spin_lock_cond_irq(&scc->zlock, irq);
@@ -254,7 +254,7 @@ static int zs_line_drain(struct zs_port *zport, int irq)
struct zs_scc *scc = zport->scc;
int loops = 10000;
- while (!(read_zsreg(zport, R1) & ALL_SNT) && loops--) {
+ while (!(read_zsreg(zport, R1) & ALL_SNT) && --loops) {
zs_spin_unlock_cond_irq(&scc->zlock, irq);
udelay(2);
zs_spin_lock_cond_irq(&scc->zlock, irq);
diff --git a/drivers/spi/omap_uwire.c b/drivers/spi/omap_uwire.c
index aa90ddb37066..8980a5640bd9 100644
--- a/drivers/spi/omap_uwire.c
+++ b/drivers/spi/omap_uwire.c
@@ -514,6 +514,8 @@ static int __init uwire_probe(struct platform_device *pdev)
/* the spi->mode bits understood by this driver: */
master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
+ master->flags = SPI_MASTER_HALF_DUPLEX;
+
master->bus_num = 2; /* "official" */
master->num_chipselect = 4;
master->setup = uwire_setup;
diff --git a/drivers/spi/spi_bitbang.c b/drivers/spi/spi_bitbang.c
index 2a5abc08e857..f1db395dd889 100644
--- a/drivers/spi/spi_bitbang.c
+++ b/drivers/spi/spi_bitbang.c
@@ -258,6 +258,11 @@ static void bitbang_work(struct work_struct *work)
struct spi_bitbang *bitbang =
container_of(work, struct spi_bitbang, work);
unsigned long flags;
+ int do_setup = -1;
+ int (*setup_transfer)(struct spi_device *,
+ struct spi_transfer *);
+
+ setup_transfer = bitbang->setup_transfer;
spin_lock_irqsave(&bitbang->lock, flags);
bitbang->busy = 1;
@@ -269,8 +274,6 @@ static void bitbang_work(struct work_struct *work)
unsigned tmp;
unsigned cs_change;
int status;
- int (*setup_transfer)(struct spi_device *,
- struct spi_transfer *);
m = container_of(bitbang->queue.next, struct spi_message,
queue);
@@ -287,19 +290,19 @@ static void bitbang_work(struct work_struct *work)
tmp = 0;
cs_change = 1;
status = 0;
- setup_transfer = NULL;
list_for_each_entry (t, &m->transfers, transfer_list) {
- /* override or restore speed and wordsize */
- if (t->speed_hz || t->bits_per_word) {
- setup_transfer = bitbang->setup_transfer;
+ /* override speed or wordsize? */
+ if (t->speed_hz || t->bits_per_word)
+ do_setup = 1;
+
+ /* init (-1) or override (1) transfer params */
+ if (do_setup != 0) {
if (!setup_transfer) {
status = -ENOPROTOOPT;
break;
}
- }
- if (setup_transfer) {
status = setup_transfer(spi, t);
if (status < 0)
break;
@@ -363,9 +366,10 @@ static void bitbang_work(struct work_struct *work)
m->status = status;
m->complete(m->context);
- /* restore speed and wordsize */
- if (setup_transfer)
+ /* restore speed and wordsize if it was overridden */
+ if (do_setup == 1)
setup_transfer(spi, NULL);
+ do_setup = 0;
/* normally deactivate chipselect ... unless no error and
* cs_change has hinted that the next message will probably
diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c
index 5d869c4d3eb2..606e7a40a8da 100644
--- a/drivers/spi/spidev.c
+++ b/drivers/spi/spidev.c
@@ -58,15 +58,20 @@ static unsigned long minors[N_SPI_MINORS / BITS_PER_LONG];
/* Bit masks for spi_device.mode management. Note that incorrect
- * settings for CS_HIGH and 3WIRE can cause *lots* of trouble for other
- * devices on a shared bus: CS_HIGH, because this device will be
- * active when it shouldn't be; 3WIRE, because when active it won't
- * behave as it should.
+ * settings for some settings can cause *lots* of trouble for other
+ * devices on a shared bus:
*
- * REVISIT should changing those two modes be privileged?
+ * - CS_HIGH ... this device will be active when it shouldn't be
+ * - 3WIRE ... when active, it won't behave as it should
+ * - NO_CS ... there will be no explicit message boundaries; this
+ * is completely incompatible with the shared bus model
+ * - READY ... transfers may proceed when they shouldn't.
+ *
+ * REVISIT should changing those flags be privileged?
*/
#define SPI_MODE_MASK (SPI_CPHA | SPI_CPOL | SPI_CS_HIGH \
- | SPI_LSB_FIRST | SPI_3WIRE | SPI_LOOP)
+ | SPI_LSB_FIRST | SPI_3WIRE | SPI_LOOP \
+ | SPI_NO_CS | SPI_READY)
struct spidev_data {
dev_t devt;
diff --git a/drivers/ssb/driver_mipscore.c b/drivers/ssb/driver_mipscore.c
index 3fd3e3b412b6..3c6feed46f6e 100644
--- a/drivers/ssb/driver_mipscore.c
+++ b/drivers/ssb/driver_mipscore.c
@@ -49,29 +49,54 @@ static const u32 ipsflag_irq_shift[] = {
static inline u32 ssb_irqflag(struct ssb_device *dev)
{
- return ssb_read32(dev, SSB_TPSFLAG) & SSB_TPSFLAG_BPFLAG;
+ u32 tpsflag = ssb_read32(dev, SSB_TPSFLAG);
+ if (tpsflag)
+ return ssb_read32(dev, SSB_TPSFLAG) & SSB_TPSFLAG_BPFLAG;
+ else
+ /* not irq supported */
+ return 0x3f;
+}
+
+static struct ssb_device *find_device(struct ssb_device *rdev, int irqflag)
+{
+ struct ssb_bus *bus = rdev->bus;
+ int i;
+ for (i = 0; i < bus->nr_devices; i++) {
+ struct ssb_device *dev;
+ dev = &(bus->devices[i]);
+ if (ssb_irqflag(dev) == irqflag)
+ return dev;
+ }
+ return NULL;
}
/* Get the MIPS IRQ assignment for a specified device.
* If unassigned, 0 is returned.
+ * If disabled, 5 is returned.
+ * If not supported, 6 is returned.
*/
unsigned int ssb_mips_irq(struct ssb_device *dev)
{
struct ssb_bus *bus = dev->bus;
+ struct ssb_device *mdev = bus->mipscore.dev;
u32 irqflag;
u32 ipsflag;
u32 tmp;
unsigned int irq;
irqflag = ssb_irqflag(dev);
+ if (irqflag == 0x3f)
+ return 6;
ipsflag = ssb_read32(bus->mipscore.dev, SSB_IPSFLAG);
for (irq = 1; irq <= 4; irq++) {
tmp = ((ipsflag & ipsflag_irq_mask[irq]) >> ipsflag_irq_shift[irq]);
if (tmp == irqflag)
break;
}
- if (irq == 5)
- irq = 0;
+ if (irq == 5) {
+ if ((1 << irqflag) & ssb_read32(mdev, SSB_INTVEC))
+ irq = 0;
+ }
return irq;
}
@@ -97,25 +122,56 @@ static void set_irq(struct ssb_device *dev, unsigned int irq)
struct ssb_device *mdev = bus->mipscore.dev;
u32 irqflag = ssb_irqflag(dev);
+ BUG_ON(oldirq == 6);
+
dev->irq = irq + 2;
- ssb_dprintk(KERN_INFO PFX
- "set_irq: core 0x%04x, irq %d => %d\n",
- dev->id.coreid, oldirq, irq);
/* clear the old irq */
if (oldirq == 0)
ssb_write32(mdev, SSB_INTVEC, (~(1 << irqflag) & ssb_read32(mdev, SSB_INTVEC)));
- else
+ else if (oldirq != 5)
clear_irq(bus, oldirq);
/* assign the new one */
if (irq == 0) {
ssb_write32(mdev, SSB_INTVEC, ((1 << irqflag) | ssb_read32(mdev, SSB_INTVEC)));
} else {
+ u32 ipsflag = ssb_read32(mdev, SSB_IPSFLAG);
+ if ((ipsflag & ipsflag_irq_mask[irq]) != ipsflag_irq_mask[irq]) {
+ u32 oldipsflag = (ipsflag & ipsflag_irq_mask[irq]) >> ipsflag_irq_shift[irq];
+ struct ssb_device *olddev = find_device(dev, oldipsflag);
+ if (olddev)
+ set_irq(olddev, 0);
+ }
irqflag <<= ipsflag_irq_shift[irq];
- irqflag |= (ssb_read32(mdev, SSB_IPSFLAG) & ~ipsflag_irq_mask[irq]);
+ irqflag |= (ipsflag & ~ipsflag_irq_mask[irq]);
ssb_write32(mdev, SSB_IPSFLAG, irqflag);
}
+ ssb_dprintk(KERN_INFO PFX
+ "set_irq: core 0x%04x, irq %d => %d\n",
+ dev->id.coreid, oldirq+2, irq+2);
+}
+
+static void print_irq(struct ssb_device *dev, unsigned int irq)
+{
+ int i;
+ static const char *irq_name[] = {"2(S)", "3", "4", "5", "6", "D", "I"};
+ ssb_dprintk(KERN_INFO PFX
+ "core 0x%04x, irq :", dev->id.coreid);
+ for (i = 0; i <= 6; i++) {
+ ssb_dprintk(" %s%s", irq_name[i], i==irq?"*":" ");
+ }
+ ssb_dprintk("\n");
+}
+
+static void dump_irq(struct ssb_bus *bus)
+{
+ int i;
+ for (i = 0; i < bus->nr_devices; i++) {
+ struct ssb_device *dev;
+ dev = &(bus->devices[i]);
+ print_irq(dev, ssb_mips_irq(dev));
+ }
}
static void ssb_mips_serial_init(struct ssb_mipscore *mcore)
@@ -197,16 +253,23 @@ void ssb_mipscore_init(struct ssb_mipscore *mcore)
/* Assign IRQs to all cores on the bus, start with irq line 2, because serial usually takes 1 */
for (irq = 2, i = 0; i < bus->nr_devices; i++) {
+ int mips_irq;
dev = &(bus->devices[i]);
- dev->irq = ssb_mips_irq(dev) + 2;
+ mips_irq = ssb_mips_irq(dev);
+ if (mips_irq > 4)
+ dev->irq = 0;
+ else
+ dev->irq = mips_irq + 2;
+ if (dev->irq > 5)
+ continue;
switch (dev->id.coreid) {
case SSB_DEV_USB11_HOST:
/* shouldn't need a separate irq line for non-4710, most of them have a proper
* external usb controller on the pci */
if ((bus->chip_id == 0x4710) && (irq <= 4)) {
set_irq(dev, irq++);
- break;
}
+ break;
/* fallthrough */
case SSB_DEV_PCI:
case SSB_DEV_ETHERNET:
@@ -220,6 +283,8 @@ void ssb_mipscore_init(struct ssb_mipscore *mcore)
}
}
}
+ ssb_dprintk(KERN_INFO PFX "after irq reconfiguration\n");
+ dump_irq(bus);
ssb_mips_serial_init(mcore);
ssb_mips_flash_detect(mcore);
diff --git a/drivers/ssb/pcmcia.c b/drivers/ssb/pcmcia.c
index fbfadbac67e8..100e7a5c5ea1 100644
--- a/drivers/ssb/pcmcia.c
+++ b/drivers/ssb/pcmcia.c
@@ -583,7 +583,7 @@ static int ssb_pcmcia_sprom_write_all(struct ssb_bus *bus, const u16 *sprom)
ssb_printk(".");
err = ssb_pcmcia_sprom_write(bus, i, sprom[i]);
if (err) {
- ssb_printk("\n" KERN_NOTICE PFX
+ ssb_printk(KERN_NOTICE PFX
"Failed to write to SPROM.\n");
failed = 1;
break;
@@ -591,7 +591,7 @@ static int ssb_pcmcia_sprom_write_all(struct ssb_bus *bus, const u16 *sprom)
}
err = ssb_pcmcia_sprom_command(bus, SSB_PCMCIA_SPROMCTL_WRITEDIS);
if (err) {
- ssb_printk("\n" KERN_NOTICE PFX
+ ssb_printk(KERN_NOTICE PFX
"Could not disable SPROM write access.\n");
failed = 1;
}
@@ -678,7 +678,8 @@ int ssb_pcmcia_get_invariants(struct ssb_bus *bus,
sprom->board_rev = tuple.TupleData[1];
break;
case SSB_PCMCIA_CIS_PA:
- GOTO_ERROR_ON(tuple.TupleDataLen != 9,
+ GOTO_ERROR_ON((tuple.TupleDataLen != 9) &&
+ (tuple.TupleDataLen != 10),
"pa tpl size");
sprom->pa0b0 = tuple.TupleData[1] |
((u16)tuple.TupleData[2] << 8);
@@ -718,7 +719,8 @@ int ssb_pcmcia_get_invariants(struct ssb_bus *bus,
sprom->antenna_gain.ghz5.a3 = tuple.TupleData[1];
break;
case SSB_PCMCIA_CIS_BFLAGS:
- GOTO_ERROR_ON(tuple.TupleDataLen != 3,
+ GOTO_ERROR_ON((tuple.TupleDataLen != 3) &&
+ (tuple.TupleDataLen != 5),
"bfl tpl size");
sprom->boardflags_lo = tuple.TupleData[1] |
((u16)tuple.TupleData[2] << 8);
diff --git a/drivers/staging/octeon/Makefile b/drivers/staging/octeon/Makefile
index 3c839e37d37f..c0a583cc2227 100644
--- a/drivers/staging/octeon/Makefile
+++ b/drivers/staging/octeon/Makefile
@@ -12,7 +12,6 @@
obj-${CONFIG_OCTEON_ETHERNET} := octeon-ethernet.o
octeon-ethernet-objs := ethernet.o
-octeon-ethernet-objs += ethernet-common.o
octeon-ethernet-objs += ethernet-mdio.o
octeon-ethernet-objs += ethernet-mem.o
octeon-ethernet-objs += ethernet-proc.o
diff --git a/drivers/staging/octeon/ethernet-common.c b/drivers/staging/octeon/ethernet-common.c
deleted file mode 100644
index 3e6f5b8cc63d..000000000000
--- a/drivers/staging/octeon/ethernet-common.c
+++ /dev/null
@@ -1,328 +0,0 @@
-/**********************************************************************
- * Author: Cavium Networks
- *
- * Contact: support@caviumnetworks.com
- * This file is part of the OCTEON SDK
- *
- * Copyright (c) 2003-2007 Cavium Networks
- *
- * This file is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, Version 2, as
- * published by the Free Software Foundation.
- *
- * This file is distributed in the hope that it will be useful, but
- * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
- * NONINFRINGEMENT. See the GNU General Public License for more
- * details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this file; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- * or visit http://www.gnu.org/licenses/.
- *
- * This file may also be available under a different license from Cavium.
- * Contact Cavium Networks for more information
-**********************************************************************/
-#include <linux/kernel.h>
-#include <linux/mii.h>
-#include <net/dst.h>
-
-#include <asm/atomic.h>
-#include <asm/octeon/octeon.h>
-
-#include "ethernet-defines.h"
-#include "ethernet-tx.h"
-#include "ethernet-mdio.h"
-#include "ethernet-util.h"
-#include "octeon-ethernet.h"
-#include "ethernet-common.h"
-
-#include "cvmx-pip.h"
-#include "cvmx-pko.h"
-#include "cvmx-fau.h"
-#include "cvmx-helper.h"
-
-#include "cvmx-gmxx-defs.h"
-
-/**
- * Get the low level ethernet statistics
- *
- * @dev: Device to get the statistics from
- * Returns Pointer to the statistics
- */
-static struct net_device_stats *cvm_oct_common_get_stats(struct net_device *dev)
-{
- cvmx_pip_port_status_t rx_status;
- cvmx_pko_port_status_t tx_status;
- struct octeon_ethernet *priv = netdev_priv(dev);
-
- if (priv->port < CVMX_PIP_NUM_INPUT_PORTS) {
- if (octeon_is_simulation()) {
- /* The simulator doesn't support statistics */
- memset(&rx_status, 0, sizeof(rx_status));
- memset(&tx_status, 0, sizeof(tx_status));
- } else {
- cvmx_pip_get_port_status(priv->port, 1, &rx_status);
- cvmx_pko_get_port_status(priv->port, 1, &tx_status);
- }
-
- priv->stats.rx_packets += rx_status.inb_packets;
- priv->stats.tx_packets += tx_status.packets;
- priv->stats.rx_bytes += rx_status.inb_octets;
- priv->stats.tx_bytes += tx_status.octets;
- priv->stats.multicast += rx_status.multicast_packets;
- priv->stats.rx_crc_errors += rx_status.inb_errors;
- priv->stats.rx_frame_errors += rx_status.fcs_align_err_packets;
-
- /*
- * The drop counter must be incremented atomically
- * since the RX tasklet also increments it.
- */
-#ifdef CONFIG_64BIT
- atomic64_add(rx_status.dropped_packets,
- (atomic64_t *)&priv->stats.rx_dropped);
-#else
- atomic_add(rx_status.dropped_packets,
- (atomic_t *)&priv->stats.rx_dropped);
-#endif
- }
-
- return &priv->stats;
-}
-
-/**
- * Set the multicast list. Currently unimplemented.
- *
- * @dev: Device to work on
- */
-static void cvm_oct_common_set_multicast_list(struct net_device *dev)
-{
- union cvmx_gmxx_prtx_cfg gmx_cfg;
- struct octeon_ethernet *priv = netdev_priv(dev);
- int interface = INTERFACE(priv->port);
- int index = INDEX(priv->port);
-
- if ((interface < 2)
- && (cvmx_helper_interface_get_mode(interface) !=
- CVMX_HELPER_INTERFACE_MODE_SPI)) {
- union cvmx_gmxx_rxx_adr_ctl control;
- control.u64 = 0;
- control.s.bcst = 1; /* Allow broadcast MAC addresses */
-
- if (dev->mc_list || (dev->flags & IFF_ALLMULTI) ||
- (dev->flags & IFF_PROMISC))
- /* Force accept multicast packets */
- control.s.mcst = 2;
- else
- /* Force reject multicat packets */
- control.s.mcst = 1;
-
- if (dev->flags & IFF_PROMISC)
- /*
- * Reject matches if promisc. Since CAM is
- * shut off, should accept everything.
- */
- control.s.cam_mode = 0;
- else
- /* Filter packets based on the CAM */
- control.s.cam_mode = 1;
-
- gmx_cfg.u64 =
- cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
- cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface),
- gmx_cfg.u64 & ~1ull);
-
- cvmx_write_csr(CVMX_GMXX_RXX_ADR_CTL(index, interface),
- control.u64);
- if (dev->flags & IFF_PROMISC)
- cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM_EN
- (index, interface), 0);
- else
- cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM_EN
- (index, interface), 1);
-
- cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface),
- gmx_cfg.u64);
- }
-}
-
-/**
- * Set the hardware MAC address for a device
- *
- * @dev: Device to change the MAC address for
- * @addr: Address structure to change it too. MAC address is addr + 2.
- * Returns Zero on success
- */
-static int cvm_oct_common_set_mac_address(struct net_device *dev, void *addr)
-{
- struct octeon_ethernet *priv = netdev_priv(dev);
- union cvmx_gmxx_prtx_cfg gmx_cfg;
- int interface = INTERFACE(priv->port);
- int index = INDEX(priv->port);
-
- memcpy(dev->dev_addr, addr + 2, 6);
-
- if ((interface < 2)
- && (cvmx_helper_interface_get_mode(interface) !=
- CVMX_HELPER_INTERFACE_MODE_SPI)) {
- int i;
- uint8_t *ptr = addr;
- uint64_t mac = 0;
- for (i = 0; i < 6; i++)
- mac = (mac << 8) | (uint64_t) (ptr[i + 2]);
-
- gmx_cfg.u64 =
- cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
- cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface),
- gmx_cfg.u64 & ~1ull);
-
- cvmx_write_csr(CVMX_GMXX_SMACX(index, interface), mac);
- cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM0(index, interface),
- ptr[2]);
- cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM1(index, interface),
- ptr[3]);
- cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM2(index, interface),
- ptr[4]);
- cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM3(index, interface),
- ptr[5]);
- cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM4(index, interface),
- ptr[6]);
- cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM5(index, interface),
- ptr[7]);
- cvm_oct_common_set_multicast_list(dev);
- cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface),
- gmx_cfg.u64);
- }
- return 0;
-}
-
-/**
- * Change the link MTU. Unimplemented
- *
- * @dev: Device to change
- * @new_mtu: The new MTU
- *
- * Returns Zero on success
- */
-static int cvm_oct_common_change_mtu(struct net_device *dev, int new_mtu)
-{
- struct octeon_ethernet *priv = netdev_priv(dev);
- int interface = INTERFACE(priv->port);
- int index = INDEX(priv->port);
-#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
- int vlan_bytes = 4;
-#else
- int vlan_bytes = 0;
-#endif
-
- /*
- * Limit the MTU to make sure the ethernet packets are between
- * 64 bytes and 65535 bytes.
- */
- if ((new_mtu + 14 + 4 + vlan_bytes < 64)
- || (new_mtu + 14 + 4 + vlan_bytes > 65392)) {
- pr_err("MTU must be between %d and %d.\n",
- 64 - 14 - 4 - vlan_bytes, 65392 - 14 - 4 - vlan_bytes);
- return -EINVAL;
- }
- dev->mtu = new_mtu;
-
- if ((interface < 2)
- && (cvmx_helper_interface_get_mode(interface) !=
- CVMX_HELPER_INTERFACE_MODE_SPI)) {
- /* Add ethernet header and FCS, and VLAN if configured. */
- int max_packet = new_mtu + 14 + 4 + vlan_bytes;
-
- if (OCTEON_IS_MODEL(OCTEON_CN3XXX)
- || OCTEON_IS_MODEL(OCTEON_CN58XX)) {
- /* Signal errors on packets larger than the MTU */
- cvmx_write_csr(CVMX_GMXX_RXX_FRM_MAX(index, interface),
- max_packet);
- } else {
- /*
- * Set the hardware to truncate packets larger
- * than the MTU and smaller the 64 bytes.
- */
- union cvmx_pip_frm_len_chkx frm_len_chk;
- frm_len_chk.u64 = 0;
- frm_len_chk.s.minlen = 64;
- frm_len_chk.s.maxlen = max_packet;
- cvmx_write_csr(CVMX_PIP_FRM_LEN_CHKX(interface),
- frm_len_chk.u64);
- }
- /*
- * Set the hardware to truncate packets larger than
- * the MTU. The jabber register must be set to a
- * multiple of 8 bytes, so round up.
- */
- cvmx_write_csr(CVMX_GMXX_RXX_JABBER(index, interface),
- (max_packet + 7) & ~7u);
- }
- return 0;
-}
-
-/**
- * Per network device initialization
- *
- * @dev: Device to initialize
- * Returns Zero on success
- */
-int cvm_oct_common_init(struct net_device *dev)
-{
- static int count;
- char mac[8] = { 0x00, 0x00,
- octeon_bootinfo->mac_addr_base[0],
- octeon_bootinfo->mac_addr_base[1],
- octeon_bootinfo->mac_addr_base[2],
- octeon_bootinfo->mac_addr_base[3],
- octeon_bootinfo->mac_addr_base[4],
- octeon_bootinfo->mac_addr_base[5] + count
- };
- struct octeon_ethernet *priv = netdev_priv(dev);
-
- /*
- * Force the interface to use the POW send if always_use_pow
- * was specified or it is in the pow send list.
- */
- if ((pow_send_group != -1)
- && (always_use_pow || strstr(pow_send_list, dev->name)))
- priv->queue = -1;
-
- if (priv->queue != -1) {
- dev->hard_start_xmit = cvm_oct_xmit;
- if (USE_HW_TCPUDP_CHECKSUM)
- dev->features |= NETIF_F_IP_CSUM;
- } else
- dev->hard_start_xmit = cvm_oct_xmit_pow;
- count++;
-
- dev->get_stats = cvm_oct_common_get_stats;
- dev->set_mac_address = cvm_oct_common_set_mac_address;
- dev->set_multicast_list = cvm_oct_common_set_multicast_list;
- dev->change_mtu = cvm_oct_common_change_mtu;
- dev->do_ioctl = cvm_oct_ioctl;
- /* We do our own locking, Linux doesn't need to */
- dev->features |= NETIF_F_LLTX;
- SET_ETHTOOL_OPS(dev, &cvm_oct_ethtool_ops);
-#ifdef CONFIG_NET_POLL_CONTROLLER
- dev->poll_controller = cvm_oct_poll_controller;
-#endif
-
- cvm_oct_mdio_setup_device(dev);
- dev->set_mac_address(dev, mac);
- dev->change_mtu(dev, dev->mtu);
-
- /*
- * Zero out stats for port so we won't mistakenly show
- * counters from the bootloader.
- */
- memset(dev->get_stats(dev), 0, sizeof(struct net_device_stats));
-
- return 0;
-}
-
-void cvm_oct_common_uninit(struct net_device *dev)
-{
- /* Currently nothing to do */
-}
diff --git a/drivers/staging/octeon/ethernet-common.h b/drivers/staging/octeon/ethernet-common.h
deleted file mode 100644
index 2bd9cd76a398..000000000000
--- a/drivers/staging/octeon/ethernet-common.h
+++ /dev/null
@@ -1,29 +0,0 @@
-/*********************************************************************
- * Author: Cavium Networks
- *
- * Contact: support@caviumnetworks.com
- * This file is part of the OCTEON SDK
- *
- * Copyright (c) 2003-2007 Cavium Networks
- *
- * This file is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, Version 2, as
- * published by the Free Software Foundation.
- *
- * This file is distributed in the hope that it will be useful, but
- * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
- * NONINFRINGEMENT. See the GNU General Public License for more
- * details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this file; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- * or visit http://www.gnu.org/licenses/.
- *
- * This file may also be available under a different license from Cavium.
- * Contact Cavium Networks for more information
-*********************************************************************/
-
-int cvm_oct_common_init(struct net_device *dev);
-void cvm_oct_common_uninit(struct net_device *dev);
diff --git a/drivers/staging/octeon/ethernet-defines.h b/drivers/staging/octeon/ethernet-defines.h
index 8f7374e7664c..f13131b03c33 100644
--- a/drivers/staging/octeon/ethernet-defines.h
+++ b/drivers/staging/octeon/ethernet-defines.h
@@ -117,6 +117,8 @@
/* Maximum number of packets to process per interrupt. */
#define MAX_RX_PACKETS 120
+/* Maximum number of SKBs to try to free per xmit packet. */
+#define MAX_SKB_TO_FREE 10
#define MAX_OUT_QUEUE_DEPTH 1000
#ifndef CONFIG_SMP
diff --git a/drivers/staging/octeon/ethernet-rgmii.c b/drivers/staging/octeon/ethernet-rgmii.c
index 8579f1670d1e..8704133fe127 100644
--- a/drivers/staging/octeon/ethernet-rgmii.c
+++ b/drivers/staging/octeon/ethernet-rgmii.c
@@ -33,7 +33,6 @@
#include "ethernet-defines.h"
#include "octeon-ethernet.h"
-#include "ethernet-common.h"
#include "ethernet-util.h"
#include "cvmx-helper.h"
@@ -265,7 +264,7 @@ static irqreturn_t cvm_oct_rgmii_rml_interrupt(int cpl, void *dev_id)
return return_status;
}
-static int cvm_oct_rgmii_open(struct net_device *dev)
+int cvm_oct_rgmii_open(struct net_device *dev)
{
union cvmx_gmxx_prtx_cfg gmx_cfg;
struct octeon_ethernet *priv = netdev_priv(dev);
@@ -286,7 +285,7 @@ static int cvm_oct_rgmii_open(struct net_device *dev)
return 0;
}
-static int cvm_oct_rgmii_stop(struct net_device *dev)
+int cvm_oct_rgmii_stop(struct net_device *dev)
{
union cvmx_gmxx_prtx_cfg gmx_cfg;
struct octeon_ethernet *priv = netdev_priv(dev);
@@ -305,9 +304,7 @@ int cvm_oct_rgmii_init(struct net_device *dev)
int r;
cvm_oct_common_init(dev);
- dev->open = cvm_oct_rgmii_open;
- dev->stop = cvm_oct_rgmii_stop;
- dev->stop(dev);
+ dev->netdev_ops->ndo_stop(dev);
/*
* Due to GMX errata in CN3XXX series chips, it is necessary
diff --git a/drivers/staging/octeon/ethernet-sgmii.c b/drivers/staging/octeon/ethernet-sgmii.c
index 58fa39c1d675..2b54996bd85d 100644
--- a/drivers/staging/octeon/ethernet-sgmii.c
+++ b/drivers/staging/octeon/ethernet-sgmii.c
@@ -34,13 +34,12 @@
#include "ethernet-defines.h"
#include "octeon-ethernet.h"
#include "ethernet-util.h"
-#include "ethernet-common.h"
#include "cvmx-helper.h"
#include "cvmx-gmxx-defs.h"
-static int cvm_oct_sgmii_open(struct net_device *dev)
+int cvm_oct_sgmii_open(struct net_device *dev)
{
union cvmx_gmxx_prtx_cfg gmx_cfg;
struct octeon_ethernet *priv = netdev_priv(dev);
@@ -61,7 +60,7 @@ static int cvm_oct_sgmii_open(struct net_device *dev)
return 0;
}
-static int cvm_oct_sgmii_stop(struct net_device *dev)
+int cvm_oct_sgmii_stop(struct net_device *dev)
{
union cvmx_gmxx_prtx_cfg gmx_cfg;
struct octeon_ethernet *priv = netdev_priv(dev);
@@ -113,9 +112,7 @@ int cvm_oct_sgmii_init(struct net_device *dev)
{
struct octeon_ethernet *priv = netdev_priv(dev);
cvm_oct_common_init(dev);
- dev->open = cvm_oct_sgmii_open;
- dev->stop = cvm_oct_sgmii_stop;
- dev->stop(dev);
+ dev->netdev_ops->ndo_stop(dev);
if (!octeon_is_simulation())
priv->poll = cvm_oct_sgmii_poll;
diff --git a/drivers/staging/octeon/ethernet-spi.c b/drivers/staging/octeon/ethernet-spi.c
index e0971bbe4ddc..66190b0cb68f 100644
--- a/drivers/staging/octeon/ethernet-spi.c
+++ b/drivers/staging/octeon/ethernet-spi.c
@@ -33,7 +33,6 @@
#include "ethernet-defines.h"
#include "octeon-ethernet.h"
-#include "ethernet-common.h"
#include "ethernet-util.h"
#include "cvmx-spi.h"
diff --git a/drivers/staging/octeon/ethernet-tx.c b/drivers/staging/octeon/ethernet-tx.c
index 77b7122c8fdb..81a851390f1b 100644
--- a/drivers/staging/octeon/ethernet-tx.c
+++ b/drivers/staging/octeon/ethernet-tx.c
@@ -47,6 +47,7 @@
#include "ethernet-defines.h"
#include "octeon-ethernet.h"
+#include "ethernet-tx.h"
#include "ethernet-util.h"
#include "cvmx-wqe.h"
@@ -82,8 +83,10 @@ int cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev)
uint64_t old_scratch2;
int dropped;
int qos;
+ int queue_it_up;
struct octeon_ethernet *priv = netdev_priv(dev);
- int32_t in_use;
+ int32_t skb_to_free;
+ int32_t undo;
int32_t buffers_to_free;
#if REUSE_SKBUFFS_WITHOUT_FREE
unsigned char *fpa_head;
@@ -120,15 +123,15 @@ int cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev)
old_scratch2 = cvmx_scratch_read64(CVMX_SCR_SCRATCH + 8);
/*
- * Assume we're going to be able t osend this
- * packet. Fetch and increment the number of pending
- * packets for output.
+ * Fetch and increment the number of packets to be
+ * freed.
*/
cvmx_fau_async_fetch_and_add32(CVMX_SCR_SCRATCH + 8,
FAU_NUM_PACKET_BUFFERS_TO_FREE,
0);
cvmx_fau_async_fetch_and_add32(CVMX_SCR_SCRATCH,
- priv->fau + qos * 4, 1);
+ priv->fau + qos * 4,
+ MAX_SKB_TO_FREE);
}
/*
@@ -253,10 +256,10 @@ int cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev)
/*
* The skbuff will be reused without ever being freed. We must
- * cleanup a bunch of Linux stuff.
+ * cleanup a bunch of core things.
*/
- dst_release(skb->dst);
- skb->dst = NULL;
+ dst_release(skb_dst(skb));
+ skb_dst_set(skb, NULL);
#ifdef CONFIG_XFRM
secpath_put(skb->sp);
skb->sp = NULL;
@@ -286,16 +289,30 @@ dont_put_skbuff_in_hw:
if (USE_ASYNC_IOBDMA) {
/* Get the number of skbuffs in use by the hardware */
CVMX_SYNCIOBDMA;
- in_use = cvmx_scratch_read64(CVMX_SCR_SCRATCH);
+ skb_to_free = cvmx_scratch_read64(CVMX_SCR_SCRATCH);
buffers_to_free = cvmx_scratch_read64(CVMX_SCR_SCRATCH + 8);
} else {
/* Get the number of skbuffs in use by the hardware */
- in_use = cvmx_fau_fetch_and_add32(priv->fau + qos * 4, 1);
+ skb_to_free = cvmx_fau_fetch_and_add32(priv->fau + qos * 4,
+ MAX_SKB_TO_FREE);
buffers_to_free =
cvmx_fau_fetch_and_add32(FAU_NUM_PACKET_BUFFERS_TO_FREE, 0);
}
/*
+ * We try to claim MAX_SKB_TO_FREE buffers. If there were not
+ * that many available, we have to un-claim (undo) any that
+ * were in excess. If skb_to_free is positive we will free
+ * that many buffers.
+ */
+ undo = skb_to_free > 0 ?
+ MAX_SKB_TO_FREE : skb_to_free + MAX_SKB_TO_FREE;
+ if (undo > 0)
+ cvmx_fau_atomic_add32(priv->fau+qos*4, -undo);
+ skb_to_free = -skb_to_free > MAX_SKB_TO_FREE ?
+ MAX_SKB_TO_FREE : -skb_to_free;
+
+ /*
* If we're sending faster than the receive can free them then
* don't do the HW free.
*/
@@ -330,38 +347,31 @@ dont_put_skbuff_in_hw:
cvmx_scratch_write64(CVMX_SCR_SCRATCH + 8, old_scratch2);
}
+ queue_it_up = 0;
if (unlikely(dropped)) {
dev_kfree_skb_any(skb);
- cvmx_fau_atomic_add32(priv->fau + qos * 4, -1);
priv->stats.tx_dropped++;
} else {
if (USE_SKBUFFS_IN_HW) {
/* Put this packet on the queue to be freed later */
if (pko_command.s.dontfree)
- skb_queue_tail(&priv->tx_free_list[qos], skb);
- else {
+ queue_it_up = 1;
+ else
cvmx_fau_atomic_add32
(FAU_NUM_PACKET_BUFFERS_TO_FREE, -1);
- cvmx_fau_atomic_add32(priv->fau + qos * 4, -1);
- }
} else {
/* Put this packet on the queue to be freed later */
- skb_queue_tail(&priv->tx_free_list[qos], skb);
+ queue_it_up = 1;
}
}
- /* Free skbuffs not in use by the hardware, possibly two at a time */
- if (skb_queue_len(&priv->tx_free_list[qos]) > in_use) {
+ if (queue_it_up) {
spin_lock(&priv->tx_free_list[qos].lock);
- /*
- * Check again now that we have the lock. It might
- * have changed.
- */
- if (skb_queue_len(&priv->tx_free_list[qos]) > in_use)
- dev_kfree_skb(__skb_dequeue(&priv->tx_free_list[qos]));
- if (skb_queue_len(&priv->tx_free_list[qos]) > in_use)
- dev_kfree_skb(__skb_dequeue(&priv->tx_free_list[qos]));
+ __skb_queue_tail(&priv->tx_free_list[qos], skb);
+ cvm_oct_free_tx_skbs(priv, skb_to_free, qos, 0);
spin_unlock(&priv->tx_free_list[qos].lock);
+ } else {
+ cvm_oct_free_tx_skbs(priv, skb_to_free, qos, 1);
}
return 0;
diff --git a/drivers/staging/octeon/ethernet-tx.h b/drivers/staging/octeon/ethernet-tx.h
index 5106236fe981..c0bebf750bc0 100644
--- a/drivers/staging/octeon/ethernet-tx.h
+++ b/drivers/staging/octeon/ethernet-tx.h
@@ -30,3 +30,28 @@ int cvm_oct_xmit_pow(struct sk_buff *skb, struct net_device *dev);
int cvm_oct_transmit_qos(struct net_device *dev, void *work_queue_entry,
int do_free, int qos);
void cvm_oct_tx_shutdown(struct net_device *dev);
+
+/**
+ * Free dead transmit skbs.
+ *
+ * @priv: The driver data
+ * @skb_to_free: The number of SKBs to free (free none if negative).
+ * @qos: The queue to free from.
+ * @take_lock: If true, acquire the skb list lock.
+ */
+static inline void cvm_oct_free_tx_skbs(struct octeon_ethernet *priv,
+ int skb_to_free,
+ int qos, int take_lock)
+{
+ /* Free skbuffs not in use by the hardware. */
+ if (skb_to_free > 0) {
+ if (take_lock)
+ spin_lock(&priv->tx_free_list[qos].lock);
+ while (skb_to_free > 0) {
+ dev_kfree_skb(__skb_dequeue(&priv->tx_free_list[qos]));
+ skb_to_free--;
+ }
+ if (take_lock)
+ spin_unlock(&priv->tx_free_list[qos].lock);
+ }
+}
diff --git a/drivers/staging/octeon/ethernet-xaui.c b/drivers/staging/octeon/ethernet-xaui.c
index f08eb32e04fc..0c2e7cc40f35 100644
--- a/drivers/staging/octeon/ethernet-xaui.c
+++ b/drivers/staging/octeon/ethernet-xaui.c
@@ -33,14 +33,13 @@
#include "ethernet-defines.h"
#include "octeon-ethernet.h"
-#include "ethernet-common.h"
#include "ethernet-util.h"
#include "cvmx-helper.h"
#include "cvmx-gmxx-defs.h"
-static int cvm_oct_xaui_open(struct net_device *dev)
+int cvm_oct_xaui_open(struct net_device *dev)
{
union cvmx_gmxx_prtx_cfg gmx_cfg;
struct octeon_ethernet *priv = netdev_priv(dev);
@@ -60,7 +59,7 @@ static int cvm_oct_xaui_open(struct net_device *dev)
return 0;
}
-static int cvm_oct_xaui_stop(struct net_device *dev)
+int cvm_oct_xaui_stop(struct net_device *dev)
{
union cvmx_gmxx_prtx_cfg gmx_cfg;
struct octeon_ethernet *priv = netdev_priv(dev);
@@ -112,9 +111,7 @@ int cvm_oct_xaui_init(struct net_device *dev)
{
struct octeon_ethernet *priv = netdev_priv(dev);
cvm_oct_common_init(dev);
- dev->open = cvm_oct_xaui_open;
- dev->stop = cvm_oct_xaui_stop;
- dev->stop(dev);
+ dev->netdev_ops->ndo_stop(dev);
if (!octeon_is_simulation())
priv->poll = cvm_oct_xaui_poll;
diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c
index e8ef9e0b791f..b8479517dce2 100644
--- a/drivers/staging/octeon/ethernet.c
+++ b/drivers/staging/octeon/ethernet.c
@@ -37,13 +37,14 @@
#include <asm/octeon/octeon.h>
#include "ethernet-defines.h"
+#include "octeon-ethernet.h"
#include "ethernet-mem.h"
#include "ethernet-rx.h"
#include "ethernet-tx.h"
+#include "ethernet-mdio.h"
#include "ethernet-util.h"
#include "ethernet-proc.h"
-#include "ethernet-common.h"
-#include "octeon-ethernet.h"
+
#include "cvmx-pip.h"
#include "cvmx-pko.h"
@@ -51,6 +52,7 @@
#include "cvmx-ipd.h"
#include "cvmx-helper.h"
+#include "cvmx-gmxx-defs.h"
#include "cvmx-smix-defs.h"
#if defined(CONFIG_CAVIUM_OCTEON_NUM_PACKET_BUFFERS) \
@@ -129,53 +131,55 @@ extern struct semaphore mdio_sem;
*/
static void cvm_do_timer(unsigned long arg)
{
+ int32_t skb_to_free, undo;
+ int queues_per_port;
+ int qos;
+ struct octeon_ethernet *priv;
static int port;
- if (port < CVMX_PIP_NUM_INPUT_PORTS) {
- if (cvm_oct_device[port]) {
- int queues_per_port;
- int qos;
- struct octeon_ethernet *priv =
- netdev_priv(cvm_oct_device[port]);
- if (priv->poll) {
- /* skip polling if we don't get the lock */
- if (!down_trylock(&mdio_sem)) {
- priv->poll(cvm_oct_device[port]);
- up(&mdio_sem);
- }
- }
- queues_per_port = cvmx_pko_get_num_queues(port);
- /* Drain any pending packets in the free list */
- for (qos = 0; qos < queues_per_port; qos++) {
- if (skb_queue_len(&priv->tx_free_list[qos])) {
- spin_lock(&priv->tx_free_list[qos].
- lock);
- while (skb_queue_len
- (&priv->tx_free_list[qos]) >
- cvmx_fau_fetch_and_add32(priv->
- fau +
- qos * 4,
- 0))
- dev_kfree_skb(__skb_dequeue
- (&priv->
- tx_free_list
- [qos]));
- spin_unlock(&priv->tx_free_list[qos].
- lock);
- }
- }
- cvm_oct_device[port]->get_stats(cvm_oct_device[port]);
- }
- port++;
- /* Poll the next port in a 50th of a second.
- This spreads the polling of ports out a little bit */
- mod_timer(&cvm_oct_poll_timer, jiffies + HZ / 50);
- } else {
+ if (port >= CVMX_PIP_NUM_INPUT_PORTS) {
+ /*
+ * All ports have been polled. Start the next
+ * iteration through the ports in one second.
+ */
port = 0;
- /* All ports have been polled. Start the next iteration through
- the ports in one second */
mod_timer(&cvm_oct_poll_timer, jiffies + HZ);
+ return;
+ }
+ if (!cvm_oct_device[port])
+ goto out;
+
+ priv = netdev_priv(cvm_oct_device[port]);
+ if (priv->poll) {
+ /* skip polling if we don't get the lock */
+ if (!down_trylock(&mdio_sem)) {
+ priv->poll(cvm_oct_device[port]);
+ up(&mdio_sem);
+ }
}
+
+ queues_per_port = cvmx_pko_get_num_queues(port);
+ /* Drain any pending packets in the free list */
+ for (qos = 0; qos < queues_per_port; qos++) {
+ if (skb_queue_len(&priv->tx_free_list[qos]) == 0)
+ continue;
+ skb_to_free = cvmx_fau_fetch_and_add32(priv->fau + qos * 4,
+ MAX_SKB_TO_FREE);
+ undo = skb_to_free > 0 ?
+ MAX_SKB_TO_FREE : skb_to_free + MAX_SKB_TO_FREE;
+ if (undo > 0)
+ cvmx_fau_atomic_add32(priv->fau+qos*4, -undo);
+ skb_to_free = -skb_to_free > MAX_SKB_TO_FREE ?
+ MAX_SKB_TO_FREE : -skb_to_free;
+ cvm_oct_free_tx_skbs(priv, skb_to_free, qos, 1);
+ }
+ cvm_oct_device[port]->netdev_ops->ndo_get_stats(cvm_oct_device[port]);
+
+out:
+ port++;
+ /* Poll the next port in a 50th of a second.
+ This spreads the polling of ports out a little bit */
+ mod_timer(&cvm_oct_poll_timer, jiffies + HZ / 50);
}
/**
@@ -246,6 +250,362 @@ int cvm_oct_free_work(void *work_queue_entry)
EXPORT_SYMBOL(cvm_oct_free_work);
/**
+ * Get the low level ethernet statistics
+ *
+ * @dev: Device to get the statistics from
+ * Returns Pointer to the statistics
+ */
+static struct net_device_stats *cvm_oct_common_get_stats(struct net_device *dev)
+{
+ cvmx_pip_port_status_t rx_status;
+ cvmx_pko_port_status_t tx_status;
+ struct octeon_ethernet *priv = netdev_priv(dev);
+
+ if (priv->port < CVMX_PIP_NUM_INPUT_PORTS) {
+ if (octeon_is_simulation()) {
+ /* The simulator doesn't support statistics */
+ memset(&rx_status, 0, sizeof(rx_status));
+ memset(&tx_status, 0, sizeof(tx_status));
+ } else {
+ cvmx_pip_get_port_status(priv->port, 1, &rx_status);
+ cvmx_pko_get_port_status(priv->port, 1, &tx_status);
+ }
+
+ priv->stats.rx_packets += rx_status.inb_packets;
+ priv->stats.tx_packets += tx_status.packets;
+ priv->stats.rx_bytes += rx_status.inb_octets;
+ priv->stats.tx_bytes += tx_status.octets;
+ priv->stats.multicast += rx_status.multicast_packets;
+ priv->stats.rx_crc_errors += rx_status.inb_errors;
+ priv->stats.rx_frame_errors += rx_status.fcs_align_err_packets;
+
+ /*
+ * The drop counter must be incremented atomically
+ * since the RX tasklet also increments it.
+ */
+#ifdef CONFIG_64BIT
+ atomic64_add(rx_status.dropped_packets,
+ (atomic64_t *)&priv->stats.rx_dropped);
+#else
+ atomic_add(rx_status.dropped_packets,
+ (atomic_t *)&priv->stats.rx_dropped);
+#endif
+ }
+
+ return &priv->stats;
+}
+
+/**
+ * Change the link MTU. Unimplemented
+ *
+ * @dev: Device to change
+ * @new_mtu: The new MTU
+ *
+ * Returns Zero on success
+ */
+static int cvm_oct_common_change_mtu(struct net_device *dev, int new_mtu)
+{
+ struct octeon_ethernet *priv = netdev_priv(dev);
+ int interface = INTERFACE(priv->port);
+ int index = INDEX(priv->port);
+#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
+ int vlan_bytes = 4;
+#else
+ int vlan_bytes = 0;
+#endif
+
+ /*
+ * Limit the MTU to make sure the ethernet packets are between
+ * 64 bytes and 65535 bytes.
+ */
+ if ((new_mtu + 14 + 4 + vlan_bytes < 64)
+ || (new_mtu + 14 + 4 + vlan_bytes > 65392)) {
+ pr_err("MTU must be between %d and %d.\n",
+ 64 - 14 - 4 - vlan_bytes, 65392 - 14 - 4 - vlan_bytes);
+ return -EINVAL;
+ }
+ dev->mtu = new_mtu;
+
+ if ((interface < 2)
+ && (cvmx_helper_interface_get_mode(interface) !=
+ CVMX_HELPER_INTERFACE_MODE_SPI)) {
+ /* Add ethernet header and FCS, and VLAN if configured. */
+ int max_packet = new_mtu + 14 + 4 + vlan_bytes;
+
+ if (OCTEON_IS_MODEL(OCTEON_CN3XXX)
+ || OCTEON_IS_MODEL(OCTEON_CN58XX)) {
+ /* Signal errors on packets larger than the MTU */
+ cvmx_write_csr(CVMX_GMXX_RXX_FRM_MAX(index, interface),
+ max_packet);
+ } else {
+ /*
+ * Set the hardware to truncate packets larger
+ * than the MTU and smaller the 64 bytes.
+ */
+ union cvmx_pip_frm_len_chkx frm_len_chk;
+ frm_len_chk.u64 = 0;
+ frm_len_chk.s.minlen = 64;
+ frm_len_chk.s.maxlen = max_packet;
+ cvmx_write_csr(CVMX_PIP_FRM_LEN_CHKX(interface),
+ frm_len_chk.u64);
+ }
+ /*
+ * Set the hardware to truncate packets larger than
+ * the MTU. The jabber register must be set to a
+ * multiple of 8 bytes, so round up.
+ */
+ cvmx_write_csr(CVMX_GMXX_RXX_JABBER(index, interface),
+ (max_packet + 7) & ~7u);
+ }
+ return 0;
+}
+
+/**
+ * Set the multicast list. Currently unimplemented.
+ *
+ * @dev: Device to work on
+ */
+static void cvm_oct_common_set_multicast_list(struct net_device *dev)
+{
+ union cvmx_gmxx_prtx_cfg gmx_cfg;
+ struct octeon_ethernet *priv = netdev_priv(dev);
+ int interface = INTERFACE(priv->port);
+ int index = INDEX(priv->port);
+
+ if ((interface < 2)
+ && (cvmx_helper_interface_get_mode(interface) !=
+ CVMX_HELPER_INTERFACE_MODE_SPI)) {
+ union cvmx_gmxx_rxx_adr_ctl control;
+ control.u64 = 0;
+ control.s.bcst = 1; /* Allow broadcast MAC addresses */
+
+ if (dev->mc_list || (dev->flags & IFF_ALLMULTI) ||
+ (dev->flags & IFF_PROMISC))
+ /* Force accept multicast packets */
+ control.s.mcst = 2;
+ else
+ /* Force reject multicat packets */
+ control.s.mcst = 1;
+
+ if (dev->flags & IFF_PROMISC)
+ /*
+ * Reject matches if promisc. Since CAM is
+ * shut off, should accept everything.
+ */
+ control.s.cam_mode = 0;
+ else
+ /* Filter packets based on the CAM */
+ control.s.cam_mode = 1;
+
+ gmx_cfg.u64 =
+ cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
+ cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface),
+ gmx_cfg.u64 & ~1ull);
+
+ cvmx_write_csr(CVMX_GMXX_RXX_ADR_CTL(index, interface),
+ control.u64);
+ if (dev->flags & IFF_PROMISC)
+ cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM_EN
+ (index, interface), 0);
+ else
+ cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM_EN
+ (index, interface), 1);
+
+ cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface),
+ gmx_cfg.u64);
+ }
+}
+
+/**
+ * Set the hardware MAC address for a device
+ *
+ * @dev: Device to change the MAC address for
+ * @addr: Address structure to change it too. MAC address is addr + 2.
+ * Returns Zero on success
+ */
+static int cvm_oct_common_set_mac_address(struct net_device *dev, void *addr)
+{
+ struct octeon_ethernet *priv = netdev_priv(dev);
+ union cvmx_gmxx_prtx_cfg gmx_cfg;
+ int interface = INTERFACE(priv->port);
+ int index = INDEX(priv->port);
+
+ memcpy(dev->dev_addr, addr + 2, 6);
+
+ if ((interface < 2)
+ && (cvmx_helper_interface_get_mode(interface) !=
+ CVMX_HELPER_INTERFACE_MODE_SPI)) {
+ int i;
+ uint8_t *ptr = addr;
+ uint64_t mac = 0;
+ for (i = 0; i < 6; i++)
+ mac = (mac << 8) | (uint64_t) (ptr[i + 2]);
+
+ gmx_cfg.u64 =
+ cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
+ cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface),
+ gmx_cfg.u64 & ~1ull);
+
+ cvmx_write_csr(CVMX_GMXX_SMACX(index, interface), mac);
+ cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM0(index, interface),
+ ptr[2]);
+ cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM1(index, interface),
+ ptr[3]);
+ cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM2(index, interface),
+ ptr[4]);
+ cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM3(index, interface),
+ ptr[5]);
+ cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM4(index, interface),
+ ptr[6]);
+ cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM5(index, interface),
+ ptr[7]);
+ cvm_oct_common_set_multicast_list(dev);
+ cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface),
+ gmx_cfg.u64);
+ }
+ return 0;
+}
+
+/**
+ * Per network device initialization
+ *
+ * @dev: Device to initialize
+ * Returns Zero on success
+ */
+int cvm_oct_common_init(struct net_device *dev)
+{
+ static int count;
+ char mac[8] = { 0x00, 0x00,
+ octeon_bootinfo->mac_addr_base[0],
+ octeon_bootinfo->mac_addr_base[1],
+ octeon_bootinfo->mac_addr_base[2],
+ octeon_bootinfo->mac_addr_base[3],
+ octeon_bootinfo->mac_addr_base[4],
+ octeon_bootinfo->mac_addr_base[5] + count
+ };
+ struct octeon_ethernet *priv = netdev_priv(dev);
+
+ /*
+ * Force the interface to use the POW send if always_use_pow
+ * was specified or it is in the pow send list.
+ */
+ if ((pow_send_group != -1)
+ && (always_use_pow || strstr(pow_send_list, dev->name)))
+ priv->queue = -1;
+
+ if (priv->queue != -1 && USE_HW_TCPUDP_CHECKSUM)
+ dev->features |= NETIF_F_IP_CSUM;
+
+ count++;
+
+ /* We do our own locking, Linux doesn't need to */
+ dev->features |= NETIF_F_LLTX;
+ SET_ETHTOOL_OPS(dev, &cvm_oct_ethtool_ops);
+
+ cvm_oct_mdio_setup_device(dev);
+ dev->netdev_ops->ndo_set_mac_address(dev, mac);
+ dev->netdev_ops->ndo_change_mtu(dev, dev->mtu);
+
+ /*
+ * Zero out stats for port so we won't mistakenly show
+ * counters from the bootloader.
+ */
+ memset(dev->netdev_ops->ndo_get_stats(dev), 0,
+ sizeof(struct net_device_stats));
+
+ return 0;
+}
+
+void cvm_oct_common_uninit(struct net_device *dev)
+{
+ /* Currently nothing to do */
+}
+
+static const struct net_device_ops cvm_oct_npi_netdev_ops = {
+ .ndo_init = cvm_oct_common_init,
+ .ndo_uninit = cvm_oct_common_uninit,
+ .ndo_start_xmit = cvm_oct_xmit,
+ .ndo_set_multicast_list = cvm_oct_common_set_multicast_list,
+ .ndo_set_mac_address = cvm_oct_common_set_mac_address,
+ .ndo_do_ioctl = cvm_oct_ioctl,
+ .ndo_change_mtu = cvm_oct_common_change_mtu,
+ .ndo_get_stats = cvm_oct_common_get_stats,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = cvm_oct_poll_controller,
+#endif
+};
+static const struct net_device_ops cvm_oct_xaui_netdev_ops = {
+ .ndo_init = cvm_oct_xaui_init,
+ .ndo_uninit = cvm_oct_xaui_uninit,
+ .ndo_open = cvm_oct_xaui_open,
+ .ndo_stop = cvm_oct_xaui_stop,
+ .ndo_start_xmit = cvm_oct_xmit,
+ .ndo_set_multicast_list = cvm_oct_common_set_multicast_list,
+ .ndo_set_mac_address = cvm_oct_common_set_mac_address,
+ .ndo_do_ioctl = cvm_oct_ioctl,
+ .ndo_change_mtu = cvm_oct_common_change_mtu,
+ .ndo_get_stats = cvm_oct_common_get_stats,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = cvm_oct_poll_controller,
+#endif
+};
+static const struct net_device_ops cvm_oct_sgmii_netdev_ops = {
+ .ndo_init = cvm_oct_sgmii_init,
+ .ndo_uninit = cvm_oct_sgmii_uninit,
+ .ndo_open = cvm_oct_sgmii_open,
+ .ndo_stop = cvm_oct_sgmii_stop,
+ .ndo_start_xmit = cvm_oct_xmit,
+ .ndo_set_multicast_list = cvm_oct_common_set_multicast_list,
+ .ndo_set_mac_address = cvm_oct_common_set_mac_address,
+ .ndo_do_ioctl = cvm_oct_ioctl,
+ .ndo_change_mtu = cvm_oct_common_change_mtu,
+ .ndo_get_stats = cvm_oct_common_get_stats,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = cvm_oct_poll_controller,
+#endif
+};
+static const struct net_device_ops cvm_oct_spi_netdev_ops = {
+ .ndo_init = cvm_oct_spi_init,
+ .ndo_uninit = cvm_oct_spi_uninit,
+ .ndo_start_xmit = cvm_oct_xmit,
+ .ndo_set_multicast_list = cvm_oct_common_set_multicast_list,
+ .ndo_set_mac_address = cvm_oct_common_set_mac_address,
+ .ndo_do_ioctl = cvm_oct_ioctl,
+ .ndo_change_mtu = cvm_oct_common_change_mtu,
+ .ndo_get_stats = cvm_oct_common_get_stats,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = cvm_oct_poll_controller,
+#endif
+};
+static const struct net_device_ops cvm_oct_rgmii_netdev_ops = {
+ .ndo_init = cvm_oct_rgmii_init,
+ .ndo_uninit = cvm_oct_rgmii_uninit,
+ .ndo_open = cvm_oct_rgmii_open,
+ .ndo_stop = cvm_oct_rgmii_stop,
+ .ndo_start_xmit = cvm_oct_xmit,
+ .ndo_set_multicast_list = cvm_oct_common_set_multicast_list,
+ .ndo_set_mac_address = cvm_oct_common_set_mac_address,
+ .ndo_do_ioctl = cvm_oct_ioctl,
+ .ndo_change_mtu = cvm_oct_common_change_mtu,
+ .ndo_get_stats = cvm_oct_common_get_stats,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = cvm_oct_poll_controller,
+#endif
+};
+static const struct net_device_ops cvm_oct_pow_netdev_ops = {
+ .ndo_init = cvm_oct_common_init,
+ .ndo_start_xmit = cvm_oct_xmit_pow,
+ .ndo_set_multicast_list = cvm_oct_common_set_multicast_list,
+ .ndo_set_mac_address = cvm_oct_common_set_mac_address,
+ .ndo_do_ioctl = cvm_oct_ioctl,
+ .ndo_change_mtu = cvm_oct_common_change_mtu,
+ .ndo_get_stats = cvm_oct_common_get_stats,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = cvm_oct_poll_controller,
+#endif
+};
+
+/**
* Module/ driver initialization. Creates the linux network
* devices.
*
@@ -303,7 +663,7 @@ static int __init cvm_oct_init_module(void)
struct octeon_ethernet *priv = netdev_priv(dev);
memset(priv, 0, sizeof(struct octeon_ethernet));
- dev->init = cvm_oct_common_init;
+ dev->netdev_ops = &cvm_oct_pow_netdev_ops;
priv->imode = CVMX_HELPER_INTERFACE_MODE_DISABLED;
priv->port = CVMX_PIP_NUM_INPUT_PORTS;
priv->queue = -1;
@@ -372,44 +732,38 @@ static int __init cvm_oct_init_module(void)
break;
case CVMX_HELPER_INTERFACE_MODE_NPI:
- dev->init = cvm_oct_common_init;
- dev->uninit = cvm_oct_common_uninit;
+ dev->netdev_ops = &cvm_oct_npi_netdev_ops;
strcpy(dev->name, "npi%d");
break;
case CVMX_HELPER_INTERFACE_MODE_XAUI:
- dev->init = cvm_oct_xaui_init;
- dev->uninit = cvm_oct_xaui_uninit;
+ dev->netdev_ops = &cvm_oct_xaui_netdev_ops;
strcpy(dev->name, "xaui%d");
break;
case CVMX_HELPER_INTERFACE_MODE_LOOP:
- dev->init = cvm_oct_common_init;
- dev->uninit = cvm_oct_common_uninit;
+ dev->netdev_ops = &cvm_oct_npi_netdev_ops;
strcpy(dev->name, "loop%d");
break;
case CVMX_HELPER_INTERFACE_MODE_SGMII:
- dev->init = cvm_oct_sgmii_init;
- dev->uninit = cvm_oct_sgmii_uninit;
+ dev->netdev_ops = &cvm_oct_sgmii_netdev_ops;
strcpy(dev->name, "eth%d");
break;
case CVMX_HELPER_INTERFACE_MODE_SPI:
- dev->init = cvm_oct_spi_init;
- dev->uninit = cvm_oct_spi_uninit;
+ dev->netdev_ops = &cvm_oct_spi_netdev_ops;
strcpy(dev->name, "spi%d");
break;
case CVMX_HELPER_INTERFACE_MODE_RGMII:
case CVMX_HELPER_INTERFACE_MODE_GMII:
- dev->init = cvm_oct_rgmii_init;
- dev->uninit = cvm_oct_rgmii_uninit;
+ dev->netdev_ops = &cvm_oct_rgmii_netdev_ops;
strcpy(dev->name, "eth%d");
break;
}
- if (!dev->init) {
+ if (!dev->netdev_ops) {
kfree(dev);
} else if (register_netdev(dev) < 0) {
pr_err("Failed to register ethernet device "
diff --git a/drivers/staging/octeon/octeon-ethernet.h b/drivers/staging/octeon/octeon-ethernet.h
index b3199076ef5e..3aef9878fc0a 100644
--- a/drivers/staging/octeon/octeon-ethernet.h
+++ b/drivers/staging/octeon/octeon-ethernet.h
@@ -111,12 +111,23 @@ static inline int cvm_oct_transmit(struct net_device *dev,
extern int cvm_oct_rgmii_init(struct net_device *dev);
extern void cvm_oct_rgmii_uninit(struct net_device *dev);
+extern int cvm_oct_rgmii_open(struct net_device *dev);
+extern int cvm_oct_rgmii_stop(struct net_device *dev);
+
extern int cvm_oct_sgmii_init(struct net_device *dev);
extern void cvm_oct_sgmii_uninit(struct net_device *dev);
+extern int cvm_oct_sgmii_open(struct net_device *dev);
+extern int cvm_oct_sgmii_stop(struct net_device *dev);
+
extern int cvm_oct_spi_init(struct net_device *dev);
extern void cvm_oct_spi_uninit(struct net_device *dev);
extern int cvm_oct_xaui_init(struct net_device *dev);
extern void cvm_oct_xaui_uninit(struct net_device *dev);
+extern int cvm_oct_xaui_open(struct net_device *dev);
+extern int cvm_oct_xaui_stop(struct net_device *dev);
+
+extern int cvm_oct_common_init(struct net_device *dev);
+extern void cvm_oct_common_uninit(struct net_device *dev);
extern int always_use_pow;
extern int pow_send_group;
diff --git a/drivers/staging/serqt_usb2/serqt_usb2.c b/drivers/staging/serqt_usb2/serqt_usb2.c
index 581232b719fd..90b29b564631 100644
--- a/drivers/staging/serqt_usb2/serqt_usb2.c
+++ b/drivers/staging/serqt_usb2/serqt_usb2.c
@@ -284,21 +284,12 @@ static void ProcessModemStatus(struct quatech_port *qt_port,
return;
}
-static void ProcessRxChar(struct usb_serial_port *port, unsigned char Data)
+static void ProcessRxChar(struct tty_struct *tty, struct usb_serial_port *port,
+ unsigned char data)
{
- struct tty_struct *tty;
struct urb *urb = port->read_urb;
- tty = tty_port_tty_get(&port->port);
-
- /* if we insert more than TTY_FLIPBUF_SIZE characters, we drop them. */
-
- if (tty && urb->actual_length) {
- tty_buffer_request_room(tty, 1);
- tty_insert_flip_string(tty, &Data, 1);
- /* tty_flip_buffer_push(tty); */
- }
-
- return;
+ if (urb->actual_length)
+ tty_insert_flip_char(tty, data, TTY_NORMAL);
}
static void qt_write_bulk_callback(struct urb *urb)
@@ -435,8 +426,10 @@ static void qt_read_bulk_callback(struct urb *urb)
case 0xff:
dbg("No status sequence. \n");
- ProcessRxChar(port, data[i]);
- ProcessRxChar(port, data[i + 1]);
+ if (tty) {
+ ProcessRxChar(tty, port, data[i]);
+ ProcessRxChar(tty, port, data[i + 1]);
+ }
i += 2;
break;
}
@@ -444,10 +437,8 @@ static void qt_read_bulk_callback(struct urb *urb)
continue;
}
- if (tty && urb->actual_length) {
- tty_buffer_request_room(tty, 1);
- tty_insert_flip_string(tty, (data + i), 1);
- }
+ if (tty && urb->actual_length)
+ tty_insert_flip_char(tty, data[i], TTY_NORMAL);
}
tty_flip_buffer_push(tty);
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index 38bfdb0f6660..3f1045993474 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -550,7 +550,7 @@ static void acm_waker(struct work_struct *waker)
static int acm_tty_open(struct tty_struct *tty, struct file *filp)
{
struct acm *acm;
- int rv = -EINVAL;
+ int rv = -ENODEV;
int i;
dbg("Entering acm_tty_open.");
@@ -677,7 +677,7 @@ static void acm_tty_close(struct tty_struct *tty, struct file *filp)
/* Perform the closing process and see if we need to do the hardware
shutdown */
- if (tty_port_close_start(&acm->port, tty, filp) == 0)
+ if (!acm || tty_port_close_start(&acm->port, tty, filp) == 0)
return;
acm_port_down(acm, 0);
tty_port_close_end(&acm->port, tty);
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index ce3f453f02ef..95ccfa0b9fc5 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -648,7 +648,7 @@ void usb_hcd_poll_rh_status(struct usb_hcd *hcd)
struct urb *urb;
int length;
unsigned long flags;
- char buffer[4]; /* Any root hubs with > 31 ports? */
+ char buffer[6]; /* Any root hubs with > 31 ports? */
if (unlikely(!hcd->rh_registered))
return;
diff --git a/drivers/usb/host/Kconfig b/drivers/usb/host/Kconfig
index 1576a0520adf..0c03471f0d41 100644
--- a/drivers/usb/host/Kconfig
+++ b/drivers/usb/host/Kconfig
@@ -337,10 +337,10 @@ config USB_R8A66597_HCD
config SUPERH_ON_CHIP_R8A66597
boolean "Enable SuperH on-chip R8A66597 USB"
- depends on USB_R8A66597_HCD && (CPU_SUBTYPE_SH7366 || CPU_SUBTYPE_SH7723)
+ depends on USB_R8A66597_HCD && (CPU_SUBTYPE_SH7366 || CPU_SUBTYPE_SH7723 || CPU_SUBTYPE_SH7724)
help
This driver enables support for the on-chip R8A66597 in the
- SH7366 and SH7723 processors.
+ SH7366, SH7723 and SH7724 processors.
config USB_WHCI_HCD
tristate "Wireless USB Host Controller Interface (WHCI) driver (EXPERIMENTAL)"
diff --git a/drivers/usb/mon/mon_bin.c b/drivers/usb/mon/mon_bin.c
index f8d9045d668a..0f7a30b7d2d1 100644
--- a/drivers/usb/mon/mon_bin.c
+++ b/drivers/usb/mon/mon_bin.c
@@ -1261,7 +1261,7 @@ static int mon_alloc_buff(struct mon_pgmap *map, int npages)
return -ENOMEM;
}
map[n].ptr = (unsigned char *) vaddr;
- map[n].pg = virt_to_page(vaddr);
+ map[n].pg = virt_to_page((void *) vaddr);
}
return 0;
}
diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c
index d595aa5586a7..a84216464ca0 100644
--- a/drivers/usb/serial/usb-serial.c
+++ b/drivers/usb/serial/usb-serial.c
@@ -333,6 +333,9 @@ static void serial_close(struct tty_struct *tty, struct file *filp)
{
struct usb_serial_port *port = tty->driver_data;
+ if (!port)
+ return;
+
dbg("%s - port %d", __func__, port->number);
diff --git a/drivers/uwb/hwa-rc.c b/drivers/uwb/hwa-rc.c
index 559f8784acf3..9052bcb4f528 100644
--- a/drivers/uwb/hwa-rc.c
+++ b/drivers/uwb/hwa-rc.c
@@ -501,7 +501,7 @@ int hwarc_filter_event_WUSB_0100(struct uwb_rc *rc, struct uwb_rceb **header,
int result = -ENOANO;
struct uwb_rceb *rceb = *header;
int event = le16_to_cpu(rceb->wEvent);
- size_t event_size;
+ ssize_t event_size;
size_t core_size, offset;
if (rceb->bEventType != UWB_RC_CET_GENERAL)
diff --git a/drivers/uwb/wlp/txrx.c b/drivers/uwb/wlp/txrx.c
index cd2035768b47..86a853b84119 100644
--- a/drivers/uwb/wlp/txrx.c
+++ b/drivers/uwb/wlp/txrx.c
@@ -326,7 +326,7 @@ int wlp_prepare_tx_frame(struct device *dev, struct wlp *wlp,
int result = -EINVAL;
struct ethhdr *eth_hdr = (void *) skb->data;
- if (is_broadcast_ether_addr(eth_hdr->h_dest)) {
+ if (is_multicast_ether_addr(eth_hdr->h_dest)) {
result = wlp_eda_for_each(&wlp->eda, wlp_wss_send_copy, skb);
if (result < 0) {
if (printk_ratelimit())
diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig
index d6d65ef85f54..8afcf08eba98 100644
--- a/drivers/video/Kconfig
+++ b/drivers/video/Kconfig
@@ -616,6 +616,8 @@ config FB_STI
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT
+ select STI_CONSOLE
+ select VT
default y
---help---
STI refers to the HP "Standard Text Interface" which is a set of
diff --git a/drivers/video/amba-clcd.c b/drivers/video/amba-clcd.c
index fb8163d181ab..a21efcd10b78 100644
--- a/drivers/video/amba-clcd.c
+++ b/drivers/video/amba-clcd.c
@@ -226,9 +226,10 @@ static int clcdfb_set_par(struct fb_info *info)
clcdfb_enable(fb, regs.cntl);
#ifdef DEBUG
- printk(KERN_INFO "CLCD: Registers set to\n"
- KERN_INFO " %08x %08x %08x %08x\n"
- KERN_INFO " %08x %08x %08x %08x\n",
+ printk(KERN_INFO
+ "CLCD: Registers set to\n"
+ " %08x %08x %08x %08x\n"
+ " %08x %08x %08x %08x\n",
readl(fb->regs + CLCD_TIM0), readl(fb->regs + CLCD_TIM1),
readl(fb->regs + CLCD_TIM2), readl(fb->regs + CLCD_TIM3),
readl(fb->regs + CLCD_UBAS), readl(fb->regs + CLCD_LBAS),
diff --git a/drivers/video/atafb.c b/drivers/video/atafb.c
index 018850c116c6..8cd279be74e5 100644
--- a/drivers/video/atafb.c
+++ b/drivers/video/atafb.c
@@ -2405,6 +2405,9 @@ static int do_fb_set_var(struct fb_var_screeninfo *var, int isactive)
return 0;
}
+/* fbhw->encode_fix() must be called with fb_info->mm_lock held
+ * if it is called after the register_framebuffer() - not a case here
+ */
static int atafb_get_fix(struct fb_fix_screeninfo *fix, struct fb_info *info)
{
struct atafb_par par;
@@ -2414,7 +2417,8 @@ static int atafb_get_fix(struct fb_fix_screeninfo *fix, struct fb_info *info)
if (err)
return err;
memset(fix, 0, sizeof(struct fb_fix_screeninfo));
- return fbhw->encode_fix(fix, &par);
+ err = fbhw->encode_fix(fix, &par);
+ return err;
}
static int atafb_get_var(struct fb_var_screeninfo *var, struct fb_info *info)
@@ -2743,7 +2747,9 @@ static int atafb_set_par(struct fb_info *info)
/* Decode wanted screen parameters */
fbhw->decode_var(&info->var, par);
+ mutex_lock(&info->mm_lock);
fbhw->encode_fix(&info->fix, par);
+ mutex_unlock(&info->mm_lock);
/* Set new videomode */
ata_set_par(par);
diff --git a/drivers/video/atmel_lcdfb.c b/drivers/video/atmel_lcdfb.c
index 5afd64482f55..da05f0801bb7 100644
--- a/drivers/video/atmel_lcdfb.c
+++ b/drivers/video/atmel_lcdfb.c
@@ -261,6 +261,9 @@ static inline void atmel_lcdfb_free_video_memory(struct atmel_lcdfb_info *sinfo)
/**
* atmel_lcdfb_alloc_video_memory - Allocate framebuffer memory
* @sinfo: the frame buffer to allocate memory for
+ *
+ * This function is called only from the atmel_lcdfb_probe()
+ * so no locking by fb_info->mm_lock around smem_len setting is needed.
*/
static int atmel_lcdfb_alloc_video_memory(struct atmel_lcdfb_info *sinfo)
{
diff --git a/drivers/video/aty/atyfb.h b/drivers/video/aty/atyfb.h
index 7691e73823d3..1f39a62f899b 100644
--- a/drivers/video/aty/atyfb.h
+++ b/drivers/video/aty/atyfb.h
@@ -187,6 +187,8 @@ struct atyfb_par {
int mtrr_reg;
#endif
u32 mem_cntl;
+ struct crtc saved_crtc;
+ union aty_pll saved_pll;
};
/*
@@ -217,6 +219,7 @@ struct atyfb_par {
#define M64F_XL_DLL 0x00080000
#define M64F_MFB_FORCE_4 0x00100000
#define M64F_HW_TRIPLE 0x00200000
+#define M64F_XL_MEM 0x00400000
/*
* Register access
*/
diff --git a/drivers/video/aty/atyfb_base.c b/drivers/video/aty/atyfb_base.c
index 1207c208a30b..63d3739d43a8 100644
--- a/drivers/video/aty/atyfb_base.c
+++ b/drivers/video/aty/atyfb_base.c
@@ -66,6 +66,8 @@
#include <linux/spinlock.h>
#include <linux/wait.h>
#include <linux/backlight.h>
+#include <linux/reboot.h>
+#include <linux/dmi.h>
#include <asm/io.h>
#include <linux/uaccess.h>
@@ -249,8 +251,6 @@ static int aty_init(struct fb_info *info);
static int store_video_par(char *videopar, unsigned char m64_num);
#endif
-static struct crtc saved_crtc;
-static union aty_pll saved_pll;
static void aty_get_crtc(const struct atyfb_par *par, struct crtc *crtc);
static void aty_set_crtc(const struct atyfb_par *par, const struct crtc *crtc);
@@ -261,6 +261,8 @@ static void set_off_pitch(struct atyfb_par *par, const struct fb_info *info);
static int read_aty_sense(const struct atyfb_par *par);
#endif
+static DEFINE_MUTEX(reboot_lock);
+static struct fb_info *reboot_info;
/*
* Interface used by the world
@@ -361,8 +363,8 @@ static unsigned long phys_guiregbase[FB_MAX] __devinitdata = { 0, };
#define ATI_CHIP_264GTPRO (ATI_MODERN_SET | M64F_SDRAM_MAGIC_PLL | M64F_HW_TRIPLE | M64F_FIFO_32 | M64F_RESET_3D)
#define ATI_CHIP_264LTPRO (ATI_MODERN_SET | M64F_HW_TRIPLE | M64F_FIFO_32 | M64F_RESET_3D)
-#define ATI_CHIP_264XL (ATI_MODERN_SET | M64F_HW_TRIPLE | M64F_FIFO_32 | M64F_RESET_3D | M64F_XL_DLL | M64F_MFB_FORCE_4)
-#define ATI_CHIP_MOBILITY (ATI_MODERN_SET | M64F_HW_TRIPLE | M64F_FIFO_32 | M64F_RESET_3D | M64F_XL_DLL | M64F_MFB_FORCE_4 | M64F_MOBIL_BUS)
+#define ATI_CHIP_264XL (ATI_MODERN_SET | M64F_HW_TRIPLE | M64F_FIFO_32 | M64F_RESET_3D | M64F_XL_DLL | M64F_MFB_FORCE_4 | M64F_XL_MEM)
+#define ATI_CHIP_MOBILITY (ATI_MODERN_SET | M64F_HW_TRIPLE | M64F_FIFO_32 | M64F_RESET_3D | M64F_XL_DLL | M64F_MFB_FORCE_4 | M64F_XL_MEM | M64F_MOBIL_BUS)
static struct {
u16 pci_id;
@@ -539,6 +541,7 @@ static char ram_edo[] __devinitdata = "EDO";
static char ram_sdram[] __devinitdata = "SDRAM (1:1)";
static char ram_sgram[] __devinitdata = "SGRAM (1:1)";
static char ram_sdram32[] __devinitdata = "SDRAM (2:1) (32-bit)";
+static char ram_wram[] __devinitdata = "WRAM";
static char ram_off[] __devinitdata = "OFF";
#endif /* CONFIG_FB_ATY_CT */
@@ -553,6 +556,10 @@ static char *aty_gx_ram[8] __devinitdata = {
#ifdef CONFIG_FB_ATY_CT
static char *aty_ct_ram[8] __devinitdata = {
ram_off, ram_dram, ram_edo, ram_edo,
+ ram_sdram, ram_sgram, ram_wram, ram_resv
+};
+static char *aty_xl_ram[8] __devinitdata = {
+ ram_off, ram_dram, ram_edo, ram_edo,
ram_sdram, ram_sgram, ram_sdram32, ram_resv
};
#endif /* CONFIG_FB_ATY_CT */
@@ -760,6 +767,17 @@ static void aty_set_crtc(const struct atyfb_par *par, const struct crtc *crtc)
#endif /* CONFIG_FB_ATY_GENERIC_LCD */
}
+static u32 calc_line_length(struct atyfb_par *par, u32 vxres, u32 bpp)
+{
+ u32 line_length = vxres * bpp / 8;
+
+ if (par->ram_type == SGRAM ||
+ (!M64_HAS(XL_MEM) && par->ram_type == WRAM))
+ line_length = (line_length + 63) & ~63;
+
+ return line_length;
+}
+
static int aty_var_to_crtc(const struct fb_info *info,
const struct fb_var_screeninfo *var, struct crtc *crtc)
{
@@ -769,13 +787,14 @@ static int aty_var_to_crtc(const struct fb_info *info,
u32 h_total, h_disp, h_sync_strt, h_sync_end, h_sync_dly, h_sync_wid, h_sync_pol;
u32 v_total, v_disp, v_sync_strt, v_sync_end, v_sync_wid, v_sync_pol, c_sync;
u32 pix_width, dp_pix_width, dp_chain_mask;
+ u32 line_length;
/* input */
- xres = var->xres;
+ xres = (var->xres + 7) & ~7;
yres = var->yres;
- vxres = var->xres_virtual;
+ vxres = (var->xres_virtual + 7) & ~7;
vyres = var->yres_virtual;
- xoffset = var->xoffset;
+ xoffset = (var->xoffset + 7) & ~7;
yoffset = var->yoffset;
bpp = var->bits_per_pixel;
if (bpp == 16)
@@ -827,7 +846,9 @@ static int aty_var_to_crtc(const struct fb_info *info,
} else
FAIL("invalid bpp");
- if (vxres * vyres * bpp / 8 > info->fix.smem_len)
+ line_length = calc_line_length(par, vxres, bpp);
+
+ if (vyres * line_length > info->fix.smem_len)
FAIL("not enough video RAM");
h_sync_pol = sync & FB_SYNC_HOR_HIGH_ACT ? 0 : 1;
@@ -969,7 +990,9 @@ static int aty_var_to_crtc(const struct fb_info *info,
crtc->xoffset = xoffset;
crtc->yoffset = yoffset;
crtc->bpp = bpp;
- crtc->off_pitch = ((yoffset*vxres+xoffset)*bpp/64) | (vxres<<19);
+ crtc->off_pitch =
+ ((yoffset * line_length + xoffset * bpp / 8) / 8) |
+ ((line_length / bpp) << 22);
crtc->vline_crnt_vline = 0;
crtc->h_tot_disp = h_total | (h_disp<<16);
@@ -1394,7 +1417,9 @@ static int atyfb_set_par(struct fb_info *info)
}
aty_st_8(DAC_MASK, 0xff, par);
- info->fix.line_length = var->xres_virtual * var->bits_per_pixel/8;
+ info->fix.line_length = calc_line_length(par, var->xres_virtual,
+ var->bits_per_pixel);
+
info->fix.visual = var->bits_per_pixel <= 8 ?
FB_VISUAL_PSEUDOCOLOR : FB_VISUAL_DIRECTCOLOR;
@@ -1505,10 +1530,12 @@ static void set_off_pitch(struct atyfb_par *par, const struct fb_info *info)
{
u32 xoffset = info->var.xoffset;
u32 yoffset = info->var.yoffset;
- u32 vxres = par->crtc.vxres;
+ u32 line_length = info->fix.line_length;
u32 bpp = info->var.bits_per_pixel;
- par->crtc.off_pitch = ((yoffset * vxres + xoffset) * bpp / 64) | (vxres << 19);
+ par->crtc.off_pitch =
+ ((yoffset * line_length + xoffset * bpp / 8) / 8) |
+ ((line_length / bpp) << 22);
}
@@ -2201,7 +2228,7 @@ static void __devinit aty_calc_mem_refresh(struct atyfb_par *par, int xclk)
const int *refresh_tbl;
int i, size;
- if (IS_XL(par->pci_id) || IS_MOBILITY(par->pci_id)) {
+ if (M64_HAS(XL_MEM)) {
refresh_tbl = ragexl_tbl;
size = ARRAY_SIZE(ragexl_tbl);
} else {
@@ -2335,7 +2362,10 @@ static int __devinit aty_init(struct fb_info *info)
par->pll_ops = &aty_pll_ct;
par->bus_type = PCI;
par->ram_type = (aty_ld_le32(CNFG_STAT0, par) & 0x07);
- ramname = aty_ct_ram[par->ram_type];
+ if (M64_HAS(XL_MEM))
+ ramname = aty_xl_ram[par->ram_type];
+ else
+ ramname = aty_ct_ram[par->ram_type];
/* for many chips, the mclk is 67 MHz for SDRAM, 63 MHz otherwise */
if (par->pll_limits.mclk == 67 && par->ram_type < SDRAM)
par->pll_limits.mclk = 63;
@@ -2390,9 +2420,9 @@ static int __devinit aty_init(struct fb_info *info)
#endif /* CONFIG_FB_ATY_CT */
/* save previous video mode */
- aty_get_crtc(par, &saved_crtc);
+ aty_get_crtc(par, &par->saved_crtc);
if(par->pll_ops->get_pll)
- par->pll_ops->get_pll(info, &saved_pll);
+ par->pll_ops->get_pll(info, &par->saved_pll);
par->mem_cntl = aty_ld_le32(MEM_CNTL, par);
gtb_memsize = M64_HAS(GTB_DSP);
@@ -2667,8 +2697,8 @@ static int __devinit aty_init(struct fb_info *info)
aty_init_exit:
/* restore video mode */
- aty_set_crtc(par, &saved_crtc);
- par->pll_ops->set_pll(info, &saved_pll);
+ aty_set_crtc(par, &par->saved_crtc);
+ par->pll_ops->set_pll(info, &par->saved_pll);
#ifdef CONFIG_MTRR
if (par->mtrr_reg >= 0) {
@@ -3502,6 +3532,11 @@ static int __devinit atyfb_pci_probe(struct pci_dev *pdev, const struct pci_devi
par->mmap_map[1].prot_flag = _PAGE_E;
#endif /* __sparc__ */
+ mutex_lock(&reboot_lock);
+ if (!reboot_info)
+ reboot_info = info;
+ mutex_unlock(&reboot_lock);
+
return 0;
err_release_io:
@@ -3614,8 +3649,8 @@ static void __devexit atyfb_remove(struct fb_info *info)
struct atyfb_par *par = (struct atyfb_par *) info->par;
/* restore video mode */
- aty_set_crtc(par, &saved_crtc);
- par->pll_ops->set_pll(info, &saved_pll);
+ aty_set_crtc(par, &par->saved_crtc);
+ par->pll_ops->set_pll(info, &par->saved_pll);
unregister_framebuffer(info);
@@ -3661,6 +3696,11 @@ static void __devexit atyfb_pci_remove(struct pci_dev *pdev)
{
struct fb_info *info = pci_get_drvdata(pdev);
+ mutex_lock(&reboot_lock);
+ if (reboot_info == info)
+ reboot_info = NULL;
+ mutex_unlock(&reboot_lock);
+
atyfb_remove(info);
}
@@ -3808,6 +3848,56 @@ static int __init atyfb_setup(char *options)
}
#endif /* MODULE */
+static int atyfb_reboot_notify(struct notifier_block *nb,
+ unsigned long code, void *unused)
+{
+ struct atyfb_par *par;
+
+ if (code != SYS_RESTART)
+ return NOTIFY_DONE;
+
+ mutex_lock(&reboot_lock);
+
+ if (!reboot_info)
+ goto out;
+
+ if (!lock_fb_info(reboot_info))
+ goto out;
+
+ par = reboot_info->par;
+
+ /*
+ * HP OmniBook 500's BIOS doesn't like the state of the
+ * hardware after atyfb has been used. Restore the hardware
+ * to the original state to allow successful reboots.
+ */
+ aty_set_crtc(par, &par->saved_crtc);
+ par->pll_ops->set_pll(reboot_info, &par->saved_pll);
+
+ unlock_fb_info(reboot_info);
+ out:
+ mutex_unlock(&reboot_lock);
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block atyfb_reboot_notifier = {
+ .notifier_call = atyfb_reboot_notify,
+};
+
+static const struct dmi_system_id atyfb_reboot_ids[] = {
+ {
+ .ident = "HP OmniBook 500",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "HP OmniBook PC"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "HP OmniBook 500 FA"),
+ },
+ },
+
+ { }
+};
+
static int __init atyfb_init(void)
{
int err1 = 1, err2 = 1;
@@ -3826,11 +3916,20 @@ static int __init atyfb_init(void)
err2 = atyfb_atari_probe();
#endif
- return (err1 && err2) ? -ENODEV : 0;
+ if (err1 && err2)
+ return -ENODEV;
+
+ if (dmi_check_system(atyfb_reboot_ids))
+ register_reboot_notifier(&atyfb_reboot_notifier);
+
+ return 0;
}
static void __exit atyfb_exit(void)
{
+ if (dmi_check_system(atyfb_reboot_ids))
+ unregister_reboot_notifier(&atyfb_reboot_notifier);
+
#ifdef CONFIG_PCI
pci_unregister_driver(&atyfb_driver);
#endif
diff --git a/drivers/video/aty/mach64_accel.c b/drivers/video/aty/mach64_accel.c
index 0cc9724e61a2..51fcc0a2c94a 100644
--- a/drivers/video/aty/mach64_accel.c
+++ b/drivers/video/aty/mach64_accel.c
@@ -63,14 +63,17 @@ static void reset_GTC_3D_engine(const struct atyfb_par *par)
void aty_init_engine(struct atyfb_par *par, struct fb_info *info)
{
u32 pitch_value;
+ u32 vxres;
/* determine modal information from global mode structure */
- pitch_value = info->var.xres_virtual;
+ pitch_value = info->fix.line_length / (info->var.bits_per_pixel / 8);
+ vxres = info->var.xres_virtual;
if (info->var.bits_per_pixel == 24) {
/* In 24 bpp, the engine is in 8 bpp - this requires that all */
/* horizontal coordinates and widths must be adjusted */
pitch_value *= 3;
+ vxres *= 3;
}
/* On GTC (RagePro), we need to reset the 3D engine before */
@@ -133,7 +136,7 @@ void aty_init_engine(struct atyfb_par *par, struct fb_info *info)
aty_st_le32(SC_LEFT, 0, par);
aty_st_le32(SC_TOP, 0, par);
aty_st_le32(SC_BOTTOM, par->crtc.vyres - 1, par);
- aty_st_le32(SC_RIGHT, pitch_value - 1, par);
+ aty_st_le32(SC_RIGHT, vxres - 1, par);
/* set background color to minimum value (usually BLACK) */
aty_st_le32(DP_BKGD_CLR, 0, par);
diff --git a/drivers/video/backlight/tdo24m.c b/drivers/video/backlight/tdo24m.c
index 1dae7f8f3c6b..51422fc4f606 100644
--- a/drivers/video/backlight/tdo24m.c
+++ b/drivers/video/backlight/tdo24m.c
@@ -356,7 +356,7 @@ static int __devinit tdo24m_probe(struct spi_device *spi)
lcd->power = FB_BLANK_POWERDOWN;
lcd->mode = MODE_VGA; /* default to VGA */
- lcd->buf = kmalloc(TDO24M_SPI_BUFF_SIZE, sizeof(GFP_KERNEL));
+ lcd->buf = kmalloc(TDO24M_SPI_BUFF_SIZE, GFP_KERNEL);
if (lcd->buf == NULL) {
kfree(lcd);
return -ENOMEM;
diff --git a/drivers/video/cobalt_lcdfb.c b/drivers/video/cobalt_lcdfb.c
index 7bad24ed04ef..108b89e09a80 100644
--- a/drivers/video/cobalt_lcdfb.c
+++ b/drivers/video/cobalt_lcdfb.c
@@ -1,7 +1,7 @@
/*
* Cobalt server LCD frame buffer driver.
*
- * Copyright (C) 2008 Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp>
+ * Copyright (C) 2008 Yoichi Yuasa <yuasa@linux-mips.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c
index f8a09bf8d0cd..53ea05645ff8 100644
--- a/drivers/video/fbmem.c
+++ b/drivers/video/fbmem.c
@@ -1310,8 +1310,6 @@ static long fb_compat_ioctl(struct file *file, unsigned int cmd,
static int
fb_mmap(struct file *file, struct vm_area_struct * vma)
-__acquires(&info->lock)
-__releases(&info->lock)
{
int fbidx = iminor(file->f_path.dentry->d_inode);
struct fb_info *info = registered_fb[fbidx];
@@ -1325,16 +1323,14 @@ __releases(&info->lock)
off = vma->vm_pgoff << PAGE_SHIFT;
if (!fb)
return -ENODEV;
+ mutex_lock(&info->mm_lock);
if (fb->fb_mmap) {
int res;
- mutex_lock(&info->lock);
res = fb->fb_mmap(info, vma);
- mutex_unlock(&info->lock);
+ mutex_unlock(&info->mm_lock);
return res;
}
- mutex_lock(&info->lock);
-
/* frame buffer memory */
start = info->fix.smem_start;
len = PAGE_ALIGN((start & ~PAGE_MASK) + info->fix.smem_len);
@@ -1342,13 +1338,13 @@ __releases(&info->lock)
/* memory mapped io */
off -= len;
if (info->var.accel_flags) {
- mutex_unlock(&info->lock);
+ mutex_unlock(&info->mm_lock);
return -EINVAL;
}
start = info->fix.mmio_start;
len = PAGE_ALIGN((start & ~PAGE_MASK) + info->fix.mmio_len);
}
- mutex_unlock(&info->lock);
+ mutex_unlock(&info->mm_lock);
start &= PAGE_MASK;
if ((vma->vm_end - vma->vm_start + off) > len)
return -EINVAL;
@@ -1518,6 +1514,7 @@ register_framebuffer(struct fb_info *fb_info)
break;
fb_info->node = i;
mutex_init(&fb_info->lock);
+ mutex_init(&fb_info->mm_lock);
fb_info->dev = device_create(fb_class, fb_info->device,
MKDEV(FB_MAJOR, i), NULL, "fb%d", i);
diff --git a/drivers/video/fsl-diu-fb.c b/drivers/video/fsl-diu-fb.c
index f153c581cbd7..72d68b3dc478 100644
--- a/drivers/video/fsl-diu-fb.c
+++ b/drivers/video/fsl-diu-fb.c
@@ -750,24 +750,26 @@ static void update_lcdc(struct fb_info *info)
static int map_video_memory(struct fb_info *info)
{
phys_addr_t phys;
+ u32 smem_len = info->fix.line_length * info->var.yres_virtual;
pr_debug("info->var.xres_virtual = %d\n", info->var.xres_virtual);
pr_debug("info->var.yres_virtual = %d\n", info->var.yres_virtual);
pr_debug("info->fix.line_length = %d\n", info->fix.line_length);
+ pr_debug("MAP_VIDEO_MEMORY: smem_len = %u\n", smem_len);
- info->fix.smem_len = info->fix.line_length * info->var.yres_virtual;
- pr_debug("MAP_VIDEO_MEMORY: smem_len = %d\n", info->fix.smem_len);
- info->screen_base = fsl_diu_alloc(info->fix.smem_len, &phys);
+ info->screen_base = fsl_diu_alloc(smem_len, &phys);
if (info->screen_base == NULL) {
printk(KERN_ERR "Unable to allocate fb memory\n");
return -ENOMEM;
}
+ mutex_lock(&info->mm_lock);
info->fix.smem_start = (unsigned long) phys;
+ info->fix.smem_len = smem_len;
+ mutex_unlock(&info->mm_lock);
info->screen_size = info->fix.smem_len;
pr_debug("Allocated fb @ paddr=0x%08lx, size=%d.\n",
- info->fix.smem_start,
- info->fix.smem_len);
+ info->fix.smem_start, info->fix.smem_len);
pr_debug("screen base %p\n", info->screen_base);
return 0;
@@ -776,9 +778,11 @@ static int map_video_memory(struct fb_info *info)
static void unmap_video_memory(struct fb_info *info)
{
fsl_diu_free(info->screen_base, info->fix.smem_len);
+ mutex_lock(&info->mm_lock);
info->screen_base = NULL;
info->fix.smem_start = 0;
info->fix.smem_len = 0;
+ mutex_unlock(&info->mm_lock);
}
/*
@@ -1219,12 +1223,6 @@ static int __devinit install_fb(struct fb_info *info)
return -EINVAL;
}
- if (fsl_diu_set_par(info)) {
- printk(KERN_ERR "fb_set_par failed");
- fb_dealloc_cmap(&info->cmap);
- return -EINVAL;
- }
-
if (register_framebuffer(info) < 0) {
printk(KERN_ERR "register_framebuffer failed");
unmap_video_memory(info);
diff --git a/drivers/video/hitfb.c b/drivers/video/hitfb.c
index 020db7fc9153..e7116a6d82d3 100644
--- a/drivers/video/hitfb.c
+++ b/drivers/video/hitfb.c
@@ -44,9 +44,6 @@ static struct fb_fix_screeninfo hitfb_fix __initdata = {
.accel = FB_ACCEL_NONE,
};
-static u32 pseudo_palette[16];
-static struct fb_info fb_info;
-
static inline void hitfb_accel_wait(void)
{
while (fb_readw(HD64461_GRCFGR) & HD64461_GRCFGR_ACCSTATUS) ;
@@ -331,6 +328,8 @@ static struct fb_ops hitfb_ops = {
static int __init hitfb_probe(struct platform_device *dev)
{
unsigned short lcdclor, ldr3, ldvndr;
+ struct fb_info *info;
+ int ret;
if (fb_get_options("hitfb", NULL))
return -ENODEV;
@@ -384,32 +383,53 @@ static int __init hitfb_probe(struct platform_device *dev)
break;
}
- fb_info.fbops = &hitfb_ops;
- fb_info.var = hitfb_var;
- fb_info.fix = hitfb_fix;
- fb_info.pseudo_palette = pseudo_palette;
- fb_info.flags = FBINFO_DEFAULT | FBINFO_HWACCEL_YPAN |
+ info = framebuffer_alloc(sizeof(u32) * 16, &dev->dev);
+ if (unlikely(!info))
+ return -ENOMEM;
+
+ info->fbops = &hitfb_ops;
+ info->var = hitfb_var;
+ info->fix = hitfb_fix;
+ info->pseudo_palette = info->par;
+ info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_YPAN |
FBINFO_HWACCEL_FILLRECT | FBINFO_HWACCEL_COPYAREA;
- fb_info.screen_base = (void *)hitfb_fix.smem_start;
+ info->screen_base = (void *)hitfb_fix.smem_start;
- fb_alloc_cmap(&fb_info.cmap, 256, 0);
+ ret = fb_alloc_cmap(&info->cmap, 256, 0);
+ if (unlikely(ret < 0))
+ goto err_fb;
- if (register_framebuffer(&fb_info) < 0)
- return -EINVAL;
+ ret = register_framebuffer(info);
+ if (unlikely(ret < 0))
+ goto err;
+
+ platform_set_drvdata(dev, info);
printk(KERN_INFO "fb%d: %s frame buffer device\n",
- fb_info.node, fb_info.fix.id);
+ info->node, info->fix.id);
+
return 0;
+
+err:
+ fb_dealloc_cmap(&info->cmap);
+err_fb:
+ framebuffer_release(info);
+ return ret;
}
static int __exit hitfb_remove(struct platform_device *dev)
{
- return unregister_framebuffer(&fb_info);
+ struct fb_info *info = platform_get_drvdata(dev);
+
+ unregister_framebuffer(info);
+ fb_dealloc_cmap(&info->cmap);
+ framebuffer_release(info);
+
+ return 0;
}
-#ifdef CONFIG_PM
-static int hitfb_suspend(struct platform_device *dev, pm_message_t state)
+static int hitfb_suspend(struct device *dev)
{
u16 v;
@@ -421,7 +441,7 @@ static int hitfb_suspend(struct platform_device *dev, pm_message_t state)
return 0;
}
-static int hitfb_resume(struct platform_device *dev)
+static int hitfb_resume(struct device *dev)
{
u16 v;
@@ -435,17 +455,19 @@ static int hitfb_resume(struct platform_device *dev)
return 0;
}
-#endif
+
+static struct dev_pm_ops hitfb_dev_pm_ops = {
+ .suspend = hitfb_suspend,
+ .resume = hitfb_resume,
+};
static struct platform_driver hitfb_driver = {
.probe = hitfb_probe,
.remove = __exit_p(hitfb_remove),
-#ifdef CONFIG_PM
- .suspend = hitfb_suspend,
- .resume = hitfb_resume,
-#endif
.driver = {
.name = "hitfb",
+ .owner = THIS_MODULE,
+ .pm = &hitfb_dev_pm_ops,
},
};
diff --git a/drivers/video/i810/i810_main.c b/drivers/video/i810/i810_main.c
index 2e940199fc89..5743ea25e818 100644
--- a/drivers/video/i810/i810_main.c
+++ b/drivers/video/i810/i810_main.c
@@ -1090,8 +1090,10 @@ static int encode_fix(struct fb_fix_screeninfo *fix, struct fb_info *info)
memset(fix, 0, sizeof(struct fb_fix_screeninfo));
strcpy(fix->id, "I810");
+ mutex_lock(&info->mm_lock);
fix->smem_start = par->fb.physical;
fix->smem_len = par->fb.size;
+ mutex_unlock(&info->mm_lock);
fix->type = FB_TYPE_PACKED_PIXELS;
fix->type_aux = 0;
fix->xpanstep = 8;
@@ -2058,8 +2060,7 @@ static int __devinit i810fb_init_pci (struct pci_dev *dev,
fb_var_to_videomode(&mode, &info->var);
fb_add_videomode(&mode, &info->modelist);
- encode_fix(&info->fix, info);
-
+
i810fb_init_ringbuffer(info);
err = register_framebuffer(info);
diff --git a/drivers/video/matrox/matroxfb_DAC1064.c b/drivers/video/matrox/matroxfb_DAC1064.c
index 0ce3b0a89798..a74e5da17aa0 100644
--- a/drivers/video/matrox/matroxfb_DAC1064.c
+++ b/drivers/video/matrox/matroxfb_DAC1064.c
@@ -454,9 +454,9 @@ static void DAC1064_restore_2(WPMINFO2) {
dprintk(KERN_DEBUG "DAC1064regs ");
for (i = 0; i < sizeof(MGA1064_DAC_regs); i++) {
dprintk("R%02X=%02X ", MGA1064_DAC_regs[i], ACCESS_FBINFO(hw).DACreg[i]);
- if ((i & 0x7) == 0x7) dprintk("\n" KERN_DEBUG "continuing... ");
+ if ((i & 0x7) == 0x7) dprintk(KERN_DEBUG "continuing... ");
}
- dprintk("\n" KERN_DEBUG "DAC1064clk ");
+ dprintk(KERN_DEBUG "DAC1064clk ");
for (i = 0; i < 6; i++)
dprintk("C%02X=%02X ", i, ACCESS_FBINFO(hw).DACclk[i]);
dprintk("\n");
diff --git a/drivers/video/matrox/matroxfb_Ti3026.c b/drivers/video/matrox/matroxfb_Ti3026.c
index 13524821e242..4e825112a601 100644
--- a/drivers/video/matrox/matroxfb_Ti3026.c
+++ b/drivers/video/matrox/matroxfb_Ti3026.c
@@ -651,9 +651,9 @@ static void Ti3026_restore(WPMINFO2) {
dprintk(KERN_DEBUG "3026DACregs ");
for (i = 0; i < 21; i++) {
dprintk("R%02X=%02X ", DACseq[i], hw->DACreg[i]);
- if ((i & 0x7) == 0x7) dprintk("\n" KERN_DEBUG "continuing... ");
+ if ((i & 0x7) == 0x7) dprintk(KERN_DEBUG "continuing... ");
}
- dprintk("\n" KERN_DEBUG "DACclk ");
+ dprintk(KERN_DEBUG "DACclk ");
for (i = 0; i < 6; i++)
dprintk("C%02X=%02X ", i, hw->DACclk[i]);
dprintk("\n");
diff --git a/drivers/video/matrox/matroxfb_base.c b/drivers/video/matrox/matroxfb_base.c
index 8e7a275df50c..0c1049b308bf 100644
--- a/drivers/video/matrox/matroxfb_base.c
+++ b/drivers/video/matrox/matroxfb_base.c
@@ -724,8 +724,10 @@ static void matroxfb_update_fix(WPMINFO2)
struct fb_fix_screeninfo *fix = &ACCESS_FBINFO(fbcon).fix;
DBG(__func__)
+ mutex_lock(&ACCESS_FBINFO(fbcon).mm_lock);
fix->smem_start = ACCESS_FBINFO(video.base) + ACCESS_FBINFO(curr.ydstorg.bytes);
fix->smem_len = ACCESS_FBINFO(video.len_usable) - ACCESS_FBINFO(curr.ydstorg.bytes);
+ mutex_unlock(&ACCESS_FBINFO(fbcon).mm_lock);
}
static int matroxfb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
@@ -1874,7 +1876,6 @@ static int initMatrox2(WPMINFO struct board* b){
}
matroxfb_init_fix(PMINFO2);
ACCESS_FBINFO(fbcon.screen_base) = vaddr_va(ACCESS_FBINFO(video.vbase));
- matroxfb_update_fix(PMINFO2);
/* Normalize values (namely yres_virtual) */
matroxfb_check_var(&vesafb_defined, &ACCESS_FBINFO(fbcon));
/* And put it into "current" var. Do NOT program hardware yet, or we'll not take over
@@ -2081,6 +2082,7 @@ static int matroxfb_probe(struct pci_dev* pdev, const struct pci_device_id* dumm
spin_lock_init(&ACCESS_FBINFO(lock.accel));
init_rwsem(&ACCESS_FBINFO(crtc2.lock));
init_rwsem(&ACCESS_FBINFO(altout.lock));
+ mutex_init(&ACCESS_FBINFO(fbcon).mm_lock);
ACCESS_FBINFO(irq_flags) = 0;
init_waitqueue_head(&ACCESS_FBINFO(crtc1.vsync.wait));
init_waitqueue_head(&ACCESS_FBINFO(crtc2.vsync.wait));
diff --git a/drivers/video/matrox/matroxfb_crtc2.c b/drivers/video/matrox/matroxfb_crtc2.c
index 7ac4c5f6145d..ebcb5c6b4962 100644
--- a/drivers/video/matrox/matroxfb_crtc2.c
+++ b/drivers/video/matrox/matroxfb_crtc2.c
@@ -289,7 +289,12 @@ static int matroxfb_dh_release(struct fb_info* info, int user) {
#undef m2info
}
-static void matroxfb_dh_init_fix(struct matroxfb_dh_fb_info *m2info) {
+/*
+ * This function is called before the register_framebuffer so
+ * no locking is needed.
+ */
+static void matroxfb_dh_init_fix(struct matroxfb_dh_fb_info *m2info)
+{
struct fb_fix_screeninfo *fix = &m2info->fbcon.fix;
strcpy(fix->id, "MATROX DH");
diff --git a/drivers/video/mx3fb.c b/drivers/video/mx3fb.c
index b7af5256e887..f8778cde2183 100644
--- a/drivers/video/mx3fb.c
+++ b/drivers/video/mx3fb.c
@@ -669,7 +669,7 @@ static uint32_t bpp_to_pixfmt(int bpp)
}
static int mx3fb_blank(int blank, struct fb_info *fbi);
-static int mx3fb_map_video_memory(struct fb_info *fbi);
+static int mx3fb_map_video_memory(struct fb_info *fbi, unsigned int mem_len);
static int mx3fb_unmap_video_memory(struct fb_info *fbi);
/**
@@ -742,8 +742,7 @@ static int mx3fb_set_par(struct fb_info *fbi)
if (fbi->fix.smem_start)
mx3fb_unmap_video_memory(fbi);
- fbi->fix.smem_len = mem_len;
- if (mx3fb_map_video_memory(fbi) < 0) {
+ if (mx3fb_map_video_memory(fbi, mem_len) < 0) {
mutex_unlock(&mx3_fbi->mutex);
return -ENOMEM;
}
@@ -1198,6 +1197,7 @@ static int mx3fb_resume(struct platform_device *pdev)
/**
* mx3fb_map_video_memory() - allocates the DRAM memory for the frame buffer.
* @fbi: framebuffer information pointer
+ * @mem_len: length of mapped memory
* @return: Error code indicating success or failure
*
* This buffer is remapped into a non-cached, non-buffered, memory region to
@@ -1205,23 +1205,26 @@ static int mx3fb_resume(struct platform_device *pdev)
* area is remapped, all virtual memory access to the video memory should occur
* at the new region.
*/
-static int mx3fb_map_video_memory(struct fb_info *fbi)
+static int mx3fb_map_video_memory(struct fb_info *fbi, unsigned int mem_len)
{
int retval = 0;
dma_addr_t addr;
fbi->screen_base = dma_alloc_writecombine(fbi->device,
- fbi->fix.smem_len,
+ mem_len,
&addr, GFP_DMA);
if (!fbi->screen_base) {
dev_err(fbi->device, "Cannot allocate %u bytes framebuffer memory\n",
- fbi->fix.smem_len);
+ mem_len);
retval = -EBUSY;
goto err0;
}
+ mutex_lock(&fbi->mm_lock);
fbi->fix.smem_start = addr;
+ fbi->fix.smem_len = mem_len;
+ mutex_unlock(&fbi->mm_lock);
dev_dbg(fbi->device, "allocated fb @ p=0x%08x, v=0x%p, size=%d.\n",
(uint32_t) fbi->fix.smem_start, fbi->screen_base, fbi->fix.smem_len);
@@ -1251,8 +1254,10 @@ static int mx3fb_unmap_video_memory(struct fb_info *fbi)
fbi->screen_base, fbi->fix.smem_start);
fbi->screen_base = 0;
+ mutex_lock(&fbi->mm_lock);
fbi->fix.smem_start = 0;
fbi->fix.smem_len = 0;
+ mutex_unlock(&fbi->mm_lock);
return 0;
}
@@ -1360,11 +1365,6 @@ static int init_fb_chan(struct mx3fb_data *mx3fb, struct idmac_channel *ichan)
init_completion(&mx3fbi->flip_cmpl);
disable_irq(ichan->eof_irq);
dev_dbg(mx3fb->dev, "disabling irq %d\n", ichan->eof_irq);
- ret = mx3fb_set_par(fbi);
- if (ret < 0)
- goto esetpar;
-
- mx3fb_blank(FB_BLANK_UNBLANK, fbi);
dev_info(dev, "registered, using mode %s\n", fb_mode);
diff --git a/drivers/video/nvidia/nv_setup.c b/drivers/video/nvidia/nv_setup.c
index 135ae18bfce8..eef2bb298d9f 100644
--- a/drivers/video/nvidia/nv_setup.c
+++ b/drivers/video/nvidia/nv_setup.c
@@ -543,8 +543,7 @@ int NVCommonSetup(struct fb_info *info)
} else if (analog_on_B) {
CRTCnumber = outputBfromCRTC;
FlatPanel = 0;
- printk("nvidiafb: CRTC %i"
- "appears to have a "
+ printk("nvidiafb: CRTC %i appears to have a "
"CRT attached\n", CRTCnumber);
} else if (slaved_on_A) {
CRTCnumber = 0;
diff --git a/drivers/video/omap/omapfb_main.c b/drivers/video/omap/omapfb_main.c
index 060d72fe57cb..4ea99bfc37b4 100644
--- a/drivers/video/omap/omapfb_main.c
+++ b/drivers/video/omap/omapfb_main.c
@@ -393,8 +393,10 @@ static void set_fb_fix(struct fb_info *fbi)
rg = &plane->fbdev->mem_desc.region[plane->idx];
fbi->screen_base = rg->vaddr;
+ mutex_lock(&fbi->mm_lock);
fix->smem_start = rg->paddr;
fix->smem_len = rg->size;
+ mutex_unlock(&fbi->mm_lock);
fix->type = FB_TYPE_PACKED_PIXELS;
bpp = var->bits_per_pixel;
@@ -886,8 +888,10 @@ static int omapfb_setup_mem(struct fb_info *fbi, struct omapfb_mem_info *mi)
* plane memory is dealloce'd, the other
* screen parameters in var / fix are invalid.
*/
+ mutex_lock(&fbi->mm_lock);
fbi->fix.smem_start = 0;
fbi->fix.smem_len = 0;
+ mutex_unlock(&fbi->mm_lock);
}
}
}
diff --git a/drivers/video/platinumfb.c b/drivers/video/platinumfb.c
index 03b3670130a0..bacfabd9ce16 100644
--- a/drivers/video/platinumfb.c
+++ b/drivers/video/platinumfb.c
@@ -141,7 +141,9 @@ static int platinumfb_set_par (struct fb_info *info)
offset = 0x10;
info->screen_base = pinfo->frame_buffer + init->fb_offset + offset;
+ mutex_lock(&info->mm_lock);
info->fix.smem_start = (pinfo->frame_buffer_phys) + init->fb_offset + offset;
+ mutex_unlock(&info->mm_lock);
info->fix.visual = (pinfo->cmode == CMODE_8) ?
FB_VISUAL_PSEUDOCOLOR : FB_VISUAL_DIRECTCOLOR;
info->fix.line_length = vmode_attrs[pinfo->vmode-1].hres * (1<<pinfo->cmode)
diff --git a/drivers/video/pxafb.c b/drivers/video/pxafb.c
index 0889d50c3288..6506117c134b 100644
--- a/drivers/video/pxafb.c
+++ b/drivers/video/pxafb.c
@@ -815,8 +815,10 @@ static int overlayfb_map_video_memory(struct pxafb_layer *ofb)
ofb->video_mem_phys = virt_to_phys(ofb->video_mem);
ofb->video_mem_size = size;
+ mutex_lock(&ofb->fb.mm_lock);
ofb->fb.fix.smem_start = ofb->video_mem_phys;
ofb->fb.fix.smem_len = ofb->fb.fix.line_length * var->yres_virtual;
+ mutex_unlock(&ofb->fb.mm_lock);
ofb->fb.screen_base = ofb->video_mem;
return 0;
}
diff --git a/drivers/video/s3c-fb.c b/drivers/video/s3c-fb.c
index 43680e545427..bb63c07e13de 100644
--- a/drivers/video/s3c-fb.c
+++ b/drivers/video/s3c-fb.c
@@ -211,23 +211,21 @@ static int s3c_fb_check_var(struct fb_var_screeninfo *var,
/**
* s3c_fb_calc_pixclk() - calculate the divider to create the pixel clock.
+ * @id: window id.
* @sfb: The hardware state.
* @pixclock: The pixel clock wanted, in picoseconds.
*
* Given the specified pixel clock, work out the necessary divider to get
* close to the output frequency.
*/
-static int s3c_fb_calc_pixclk(struct s3c_fb *sfb, unsigned int pixclk)
+static int s3c_fb_calc_pixclk(unsigned char id, struct s3c_fb *sfb, unsigned int pixclk)
{
+ struct s3c_fb_pd_win *win = sfb->pdata->win[id];
unsigned long clk = clk_get_rate(sfb->bus_clk);
- unsigned long long tmp;
unsigned int result;
- tmp = (unsigned long long)clk;
- tmp *= pixclk;
-
- do_div(tmp, 1000000000UL);
- result = (unsigned int)tmp / 1000;
+ pixclk *= win->win_mode.refresh;
+ result = clk / pixclk;
dev_dbg(sfb->dev, "pixclk=%u, clk=%lu, div=%d (%lu)\n",
pixclk, clk, result, clk / result);
@@ -267,6 +265,7 @@ static int s3c_fb_set_par(struct fb_info *info)
struct s3c_fb *sfb = win->parent;
void __iomem *regs = sfb->regs;
int win_no = win->index;
+ u32 osdc_data = 0;
u32 data;
u32 pagewidth;
int clkdiv;
@@ -302,7 +301,7 @@ static int s3c_fb_set_par(struct fb_info *info)
/* use window 0 as the basis for the lcd output timings */
if (win_no == 0) {
- clkdiv = s3c_fb_calc_pixclk(sfb, var->pixclock);
+ clkdiv = s3c_fb_calc_pixclk(win_no, sfb, var->pixclock);
data = sfb->pdata->vidcon0;
data &= ~(VIDCON0_CLKVAL_F_MASK | VIDCON0_CLKDIR);
@@ -359,8 +358,6 @@ static int s3c_fb_set_par(struct fb_info *info)
data = var->xres * var->yres;
- u32 osdc_data = 0;
-
osdc_data = VIDISD14C_ALPHA1_R(0xf) |
VIDISD14C_ALPHA1_G(0xf) |
VIDISD14C_ALPHA1_B(0xf);
diff --git a/drivers/video/sh7760fb.c b/drivers/video/sh7760fb.c
index 653bdfee3057..9f6d6e61f0cc 100644
--- a/drivers/video/sh7760fb.c
+++ b/drivers/video/sh7760fb.c
@@ -120,18 +120,6 @@ static int sh7760_setcolreg (u_int regno,
return 0;
}
-static void encode_fix(struct fb_fix_screeninfo *fix, struct fb_info *info,
- unsigned long stride)
-{
- memset(fix, 0, sizeof(struct fb_fix_screeninfo));
- strcpy(fix->id, "sh7760-lcdc");
-
- fix->smem_start = (unsigned long)info->screen_base;
- fix->smem_len = info->screen_size;
-
- fix->line_length = stride;
-}
-
static int sh7760fb_get_color_info(struct device *dev,
u16 lddfr, int *bpp, int *gray)
{
@@ -334,7 +322,8 @@ static int sh7760fb_set_par(struct fb_info *info)
iowrite32(ldsarl, par->base + LDSARL); /* mem for lower half of DSTN */
- encode_fix(&info->fix, info, stride);
+ info->fix.line_length = stride;
+
sh7760fb_check_var(&info->var, info);
sh7760fb_blank(FB_BLANK_UNBLANK, info); /* panel on! */
@@ -435,6 +424,8 @@ static int sh7760fb_alloc_mem(struct fb_info *info)
info->screen_base = fbmem;
info->screen_size = vram;
+ info->fix.smem_start = (unsigned long)info->screen_base;
+ info->fix.smem_len = info->screen_size;
return 0;
}
@@ -520,6 +511,8 @@ static int __devinit sh7760fb_probe(struct platform_device *pdev)
info->var.transp.length = 0;
info->var.transp.msb_right = 0;
+ strcpy(info->fix.id, "sh7760-lcdc");
+
/* set the DON2 bit now, before cmap allocation, as it will randomize
* palette memory.
*/
diff --git a/drivers/video/sh_mobile_lcdcfb.c b/drivers/video/sh_mobile_lcdcfb.c
index f10d2fbeda06..8f24564f77b0 100644
--- a/drivers/video/sh_mobile_lcdcfb.c
+++ b/drivers/video/sh_mobile_lcdcfb.c
@@ -17,6 +17,7 @@
#include <linux/platform_device.h>
#include <linux/dma-mapping.h>
#include <linux/interrupt.h>
+#include <linux/vmalloc.h>
#include <video/sh_mobile_lcdc.h>
#include <asm/atomic.h>
@@ -30,9 +31,10 @@ struct sh_mobile_lcdc_chan {
unsigned long enabled; /* ME and SE in LDCNT2R */
struct sh_mobile_lcdc_chan_cfg cfg;
u32 pseudo_palette[PALETTE_NR];
- struct fb_info info;
+ struct fb_info *info;
dma_addr_t dma_handle;
struct fb_deferred_io defio;
+ struct scatterlist *sglist;
unsigned long frame_end;
wait_queue_head_t frame_end_wait;
};
@@ -206,16 +208,38 @@ static void sh_mobile_lcdc_clk_on(struct sh_mobile_lcdc_priv *priv) {}
static void sh_mobile_lcdc_clk_off(struct sh_mobile_lcdc_priv *priv) {}
#endif
+static int sh_mobile_lcdc_sginit(struct fb_info *info,
+ struct list_head *pagelist)
+{
+ struct sh_mobile_lcdc_chan *ch = info->par;
+ unsigned int nr_pages_max = info->fix.smem_len >> PAGE_SHIFT;
+ struct page *page;
+ int nr_pages = 0;
+
+ sg_init_table(ch->sglist, nr_pages_max);
+
+ list_for_each_entry(page, pagelist, lru)
+ sg_set_page(&ch->sglist[nr_pages++], page, PAGE_SIZE, 0);
+
+ return nr_pages;
+}
+
static void sh_mobile_lcdc_deferred_io(struct fb_info *info,
struct list_head *pagelist)
{
struct sh_mobile_lcdc_chan *ch = info->par;
+ unsigned int nr_pages;
/* enable clocks before accessing hardware */
sh_mobile_lcdc_clk_on(ch->lcdc);
+ nr_pages = sh_mobile_lcdc_sginit(info, pagelist);
+ dma_map_sg(info->dev, ch->sglist, nr_pages, DMA_TO_DEVICE);
+
/* trigger panel update */
lcdc_write_chan(ch, LDSM2R, 1);
+
+ dma_unmap_sg(info->dev, ch->sglist, nr_pages, DMA_TO_DEVICE);
}
static void sh_mobile_lcdc_deferred_io_touch(struct fb_info *info)
@@ -418,22 +442,22 @@ static int sh_mobile_lcdc_start(struct sh_mobile_lcdc_priv *priv)
/* set bpp format in PKF[4:0] */
tmp = lcdc_read_chan(ch, LDDFR);
tmp &= ~(0x0001001f);
- tmp |= (priv->ch[k].info.var.bits_per_pixel == 16) ? 3 : 0;
+ tmp |= (ch->info->var.bits_per_pixel == 16) ? 3 : 0;
lcdc_write_chan(ch, LDDFR, tmp);
/* point out our frame buffer */
- lcdc_write_chan(ch, LDSA1R, ch->info.fix.smem_start);
+ lcdc_write_chan(ch, LDSA1R, ch->info->fix.smem_start);
/* set line size */
- lcdc_write_chan(ch, LDMLSR, ch->info.fix.line_length);
+ lcdc_write_chan(ch, LDMLSR, ch->info->fix.line_length);
/* setup deferred io if SYS bus */
tmp = ch->cfg.sys_bus_cfg.deferred_io_msec;
if (ch->ldmt1r_value & (1 << 12) && tmp) {
ch->defio.deferred_io = sh_mobile_lcdc_deferred_io;
ch->defio.delay = msecs_to_jiffies(tmp);
- ch->info.fbdefio = &ch->defio;
- fb_deferred_io_init(&ch->info);
+ ch->info->fbdefio = &ch->defio;
+ fb_deferred_io_init(ch->info);
/* one-shot mode */
lcdc_write_chan(ch, LDSM1R, 1);
@@ -479,12 +503,12 @@ static void sh_mobile_lcdc_stop(struct sh_mobile_lcdc_priv *priv)
* flush frame, and wait for frame end interrupt
* clean up deferred io and enable clock
*/
- if (ch->info.fbdefio) {
+ if (ch->info->fbdefio) {
ch->frame_end = 0;
- schedule_delayed_work(&ch->info.deferred_work, 0);
+ schedule_delayed_work(&ch->info->deferred_work, 0);
wait_event(ch->frame_end_wait, ch->frame_end);
- fb_deferred_io_cleanup(&ch->info);
- ch->info.fbdefio = NULL;
+ fb_deferred_io_cleanup(ch->info);
+ ch->info->fbdefio = NULL;
sh_mobile_lcdc_clk_on(priv);
}
@@ -793,9 +817,16 @@ static int __init sh_mobile_lcdc_probe(struct platform_device *pdev)
priv->base = ioremap_nocache(res->start, (res->end - res->start) + 1);
for (i = 0; i < j; i++) {
- info = &priv->ch[i].info;
cfg = &priv->ch[i].cfg;
+ priv->ch[i].info = framebuffer_alloc(0, &pdev->dev);
+ if (!priv->ch[i].info) {
+ dev_err(&pdev->dev, "unable to allocate fb_info\n");
+ error = -ENOMEM;
+ break;
+ }
+
+ info = priv->ch[i].info;
info->fbops = &sh_mobile_lcdc_ops;
info->var.xres = info->var.xres_virtual = cfg->lcd_cfg.xres;
info->var.yres = info->var.yres_virtual = cfg->lcd_cfg.yres;
@@ -846,21 +877,31 @@ static int __init sh_mobile_lcdc_probe(struct platform_device *pdev)
}
for (i = 0; i < j; i++) {
- error = register_framebuffer(&priv->ch[i].info);
+ struct sh_mobile_lcdc_chan *ch = priv->ch + i;
+
+ info = ch->info;
+
+ if (info->fbdefio) {
+ priv->ch->sglist = vmalloc(sizeof(struct scatterlist) *
+ info->fix.smem_len >> PAGE_SHIFT);
+ if (!priv->ch->sglist) {
+ dev_err(&pdev->dev, "cannot allocate sglist\n");
+ goto err1;
+ }
+ }
+
+ error = register_framebuffer(info);
if (error < 0)
goto err1;
- }
- for (i = 0; i < j; i++) {
- info = &priv->ch[i].info;
dev_info(info->dev,
"registered %s/%s as %dx%d %dbpp.\n",
pdev->name,
- (priv->ch[i].cfg.chan == LCDC_CHAN_MAINLCD) ?
+ (ch->cfg.chan == LCDC_CHAN_MAINLCD) ?
"mainlcd" : "sublcd",
- (int) priv->ch[i].cfg.lcd_cfg.xres,
- (int) priv->ch[i].cfg.lcd_cfg.yres,
- priv->ch[i].cfg.bpp);
+ (int) ch->cfg.lcd_cfg.xres,
+ (int) ch->cfg.lcd_cfg.yres,
+ ch->cfg.bpp);
/* deferred io mode: disable clock to save power */
if (info->fbdefio)
@@ -881,20 +922,24 @@ static int sh_mobile_lcdc_remove(struct platform_device *pdev)
int i;
for (i = 0; i < ARRAY_SIZE(priv->ch); i++)
- if (priv->ch[i].info.dev)
- unregister_framebuffer(&priv->ch[i].info);
+ if (priv->ch[i].info->dev)
+ unregister_framebuffer(priv->ch[i].info);
sh_mobile_lcdc_stop(priv);
for (i = 0; i < ARRAY_SIZE(priv->ch); i++) {
- info = &priv->ch[i].info;
+ info = priv->ch[i].info;
- if (!info->device)
+ if (!info || !info->device)
continue;
+ if (priv->ch[i].sglist)
+ vfree(priv->ch[i].sglist);
+
dma_free_coherent(&pdev->dev, info->fix.smem_len,
info->screen_base, priv->ch[i].dma_handle);
fb_dealloc_cmap(&info->cmap);
+ framebuffer_release(info);
}
#ifdef CONFIG_HAVE_CLK
diff --git a/drivers/video/sis/sis_main.c b/drivers/video/sis/sis_main.c
index 7072d19080d5..4a067f0d0ceb 100644
--- a/drivers/video/sis/sis_main.c
+++ b/drivers/video/sis/sis_main.c
@@ -1847,8 +1847,10 @@ sisfb_get_fix(struct fb_fix_screeninfo *fix, int con, struct fb_info *info)
strcpy(fix->id, ivideo->myid);
+ mutex_lock(&info->mm_lock);
fix->smem_start = ivideo->video_base + ivideo->video_offset;
fix->smem_len = ivideo->sisfb_mem;
+ mutex_unlock(&info->mm_lock);
fix->type = FB_TYPE_PACKED_PIXELS;
fix->type_aux = 0;
fix->visual = (ivideo->video_bpp == 8) ? FB_VISUAL_PSEUDOCOLOR : FB_VISUAL_TRUECOLOR;
@@ -6365,7 +6367,6 @@ error_3: vfree(ivideo->bios_abase);
sis_fb_info->fix = ivideo->sisfb_fix;
sis_fb_info->screen_base = ivideo->video_vbase + ivideo->video_offset;
sis_fb_info->fbops = &sisfb_ops;
- sisfb_get_fix(&sis_fb_info->fix, -1, sis_fb_info);
sis_fb_info->pseudo_palette = ivideo->pseudo_palette;
fb_alloc_cmap(&sis_fb_info->cmap, 256 , 0);
diff --git a/drivers/video/sm501fb.c b/drivers/video/sm501fb.c
index eb5d73a06702..924d79462780 100644
--- a/drivers/video/sm501fb.c
+++ b/drivers/video/sm501fb.c
@@ -145,7 +145,7 @@ static inline void sm501fb_sync_regs(struct sm501fb_info *info)
#define SM501_MEMF_ACCEL (8)
static int sm501_alloc_mem(struct sm501fb_info *inf, struct sm501_mem *mem,
- unsigned int why, size_t size)
+ unsigned int why, size_t size, u32 smem_len)
{
struct sm501fb_par *par;
struct fb_info *fbi;
@@ -172,7 +172,7 @@ static int sm501_alloc_mem(struct sm501fb_info *inf, struct sm501_mem *mem,
if (ptr > 0)
ptr &= ~(PAGE_SIZE - 1);
- if (fbi && ptr < fbi->fix.smem_len)
+ if (fbi && ptr < smem_len)
return -ENOMEM;
break;
@@ -197,7 +197,7 @@ static int sm501_alloc_mem(struct sm501fb_info *inf, struct sm501_mem *mem,
case SM501_MEMF_ACCEL:
fbi = inf->fb[HEAD_CRT];
- ptr = fbi ? fbi->fix.smem_len : 0;
+ ptr = fbi ? smem_len : 0;
fbi = inf->fb[HEAD_PANEL];
if (fbi) {
@@ -413,6 +413,7 @@ static int sm501fb_set_par_common(struct fb_info *info,
unsigned int mem_type;
unsigned int clock_type;
unsigned int head_addr;
+ unsigned int smem_len;
dev_dbg(fbi->dev, "%s: %dx%d, bpp = %d, virtual %dx%d\n",
__func__, var->xres, var->yres, var->bits_per_pixel,
@@ -453,18 +454,20 @@ static int sm501fb_set_par_common(struct fb_info *info,
/* allocate fb memory within 501 */
info->fix.line_length = (var->xres_virtual * var->bits_per_pixel)/8;
- info->fix.smem_len = info->fix.line_length * var->yres_virtual;
+ smem_len = info->fix.line_length * var->yres_virtual;
dev_dbg(fbi->dev, "%s: line length = %u\n", __func__,
info->fix.line_length);
- if (sm501_alloc_mem(fbi, &par->screen, mem_type,
- info->fix.smem_len)) {
+ if (sm501_alloc_mem(fbi, &par->screen, mem_type, smem_len, smem_len)) {
dev_err(fbi->dev, "no memory available\n");
return -ENOMEM;
}
+ mutex_lock(&info->mm_lock);
info->fix.smem_start = fbi->fbmem_res->start + par->screen.sm_addr;
+ info->fix.smem_len = smem_len;
+ mutex_unlock(&info->mm_lock);
info->screen_base = fbi->fbmem + par->screen.sm_addr;
info->screen_size = info->fix.smem_len;
@@ -637,7 +640,8 @@ static int sm501fb_set_par_crt(struct fb_info *info)
if ((control & SM501_DC_CRT_CONTROL_SEL) == 0) {
/* the head is displaying panel data... */
- sm501_alloc_mem(fbi, &par->screen, SM501_MEMF_CRT, 0);
+ sm501_alloc_mem(fbi, &par->screen, SM501_MEMF_CRT, 0,
+ info->fix.smem_len);
goto out_update;
}
@@ -1289,7 +1293,8 @@ static int sm501_init_cursor(struct fb_info *fbi, unsigned int reg_base)
par->cursor_regs = info->regs + reg_base;
- ret = sm501_alloc_mem(info, &par->cursor, SM501_MEMF_CURSOR, 1024);
+ ret = sm501_alloc_mem(info, &par->cursor, SM501_MEMF_CURSOR, 1024,
+ fbi->fix.smem_len);
if (ret < 0)
return ret;
@@ -1535,9 +1540,6 @@ static int sm501fb_init_fb(struct fb_info *fb,
if (ret)
dev_err(info->dev, "check_var() failed on initial setup?\n");
- /* ensure we've activated our new configuration */
- (fb->fbops->fb_set_par)(fb);
-
return 0;
}
@@ -1619,6 +1621,8 @@ static int __devinit sm501fb_start_one(struct sm501fb_info *info,
if (!fbi)
return 0;
+ mutex_init(&info->fb[head]->mm_lock);
+
ret = sm501fb_init_fb(info->fb[head], head, drvname);
if (ret) {
dev_err(info->dev, "cannot initialise fb %s\n", drvname);
diff --git a/drivers/video/stifb.c b/drivers/video/stifb.c
index eec9dcb7f599..6120f0c526fe 100644
--- a/drivers/video/stifb.c
+++ b/drivers/video/stifb.c
@@ -1115,10 +1115,9 @@ static int __init stifb_init_fb(struct sti_struct *sti, int bpp_pref)
if the device name contains the string "DX" and tell the
user how to reconfigure the card. */
if (strstr(sti->outptr.dev_name, "DX")) {
- printk(KERN_WARNING "WARNING: stifb framebuffer driver does not "
- "support '%s' in double-buffer mode.\n"
- KERN_WARNING "WARNING: Please disable the double-buffer mode "
- "in IPL menu (the PARISC-BIOS).\n",
+ printk(KERN_WARNING
+"WARNING: stifb framebuffer driver does not support '%s' in double-buffer mode.\n"
+"WARNING: Please disable the double-buffer mode in IPL menu (the PARISC-BIOS).\n",
sti->outptr.dev_name);
goto out_err0;
}
diff --git a/drivers/video/w100fb.c b/drivers/video/w100fb.c
index d0674f1e3f10..2376f688ec8b 100644
--- a/drivers/video/w100fb.c
+++ b/drivers/video/w100fb.c
@@ -523,6 +523,7 @@ static int w100fb_set_par(struct fb_info *info)
info->fix.ywrapstep = 0;
info->fix.line_length = par->xres * BITS_PER_PIXEL / 8;
+ mutex_lock(&info->mm_lock);
if ((par->xres*par->yres*BITS_PER_PIXEL/8) > (MEM_INT_SIZE+1)) {
par->extmem_active = 1;
info->fix.smem_len = par->mach->mem->size+1;
@@ -530,6 +531,7 @@ static int w100fb_set_par(struct fb_info *info)
par->extmem_active = 0;
info->fix.smem_len = MEM_INT_SIZE+1;
}
+ mutex_unlock(&info->mm_lock);
w100fb_activate_var(par);
}
@@ -746,8 +748,6 @@ int __init w100fb_probe(struct platform_device *pdev)
goto out;
}
- w100fb_set_par(info);
-
if (register_framebuffer(info) < 0) {
err = -EINVAL;
goto out;
diff --git a/drivers/vlynq/Kconfig b/drivers/vlynq/Kconfig
index f6542211db48..a9efb1625321 100644
--- a/drivers/vlynq/Kconfig
+++ b/drivers/vlynq/Kconfig
@@ -13,7 +13,7 @@ config VLYNQ
config VLYNQ_DEBUG
bool "VLYNQ bus debug"
- depends on VLYNQ && KERNEL_DEBUG
+ depends on VLYNQ && DEBUG_KERNEL
help
Turn on VLYNQ bus debugging.
diff --git a/drivers/vlynq/vlynq.c b/drivers/vlynq/vlynq.c
index 7335433b067b..f05d2a368367 100644
--- a/drivers/vlynq/vlynq.c
+++ b/drivers/vlynq/vlynq.c
@@ -76,7 +76,7 @@ struct vlynq_regs {
u32 int_device[8];
};
-#ifdef VLYNQ_DEBUG
+#ifdef CONFIG_VLYNQ_DEBUG
static void vlynq_dump_regs(struct vlynq_device *dev)
{
int i;
diff --git a/drivers/w1/slaves/w1_ds2760.c b/drivers/w1/slaves/w1_ds2760.c
index 1f09d4e4144c..59f708efe25f 100644
--- a/drivers/w1/slaves/w1_ds2760.c
+++ b/drivers/w1/slaves/w1_ds2760.c
@@ -68,6 +68,34 @@ int w1_ds2760_write(struct device *dev, char *buf, int addr, size_t count)
return w1_ds2760_io(dev, buf, addr, count, 1);
}
+static int w1_ds2760_eeprom_cmd(struct device *dev, int addr, int cmd)
+{
+ struct w1_slave *sl = container_of(dev, struct w1_slave, dev);
+
+ if (!dev)
+ return -EINVAL;
+
+ mutex_lock(&sl->master->mutex);
+
+ if (w1_reset_select_slave(sl) == 0) {
+ w1_write_8(sl->master, cmd);
+ w1_write_8(sl->master, addr);
+ }
+
+ mutex_unlock(&sl->master->mutex);
+ return 0;
+}
+
+int w1_ds2760_store_eeprom(struct device *dev, int addr)
+{
+ return w1_ds2760_eeprom_cmd(dev, addr, W1_DS2760_COPY_DATA);
+}
+
+int w1_ds2760_recall_eeprom(struct device *dev, int addr)
+{
+ return w1_ds2760_eeprom_cmd(dev, addr, W1_DS2760_RECALL_DATA);
+}
+
static ssize_t w1_ds2760_read_bin(struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
@@ -200,6 +228,8 @@ static void __exit w1_ds2760_exit(void)
EXPORT_SYMBOL(w1_ds2760_read);
EXPORT_SYMBOL(w1_ds2760_write);
+EXPORT_SYMBOL(w1_ds2760_store_eeprom);
+EXPORT_SYMBOL(w1_ds2760_recall_eeprom);
module_init(w1_ds2760_init);
module_exit(w1_ds2760_exit);
diff --git a/drivers/w1/slaves/w1_ds2760.h b/drivers/w1/slaves/w1_ds2760.h
index f1302429cb02..58e774141568 100644
--- a/drivers/w1/slaves/w1_ds2760.h
+++ b/drivers/w1/slaves/w1_ds2760.h
@@ -25,6 +25,10 @@
#define DS2760_PROTECTION_REG 0x00
#define DS2760_STATUS_REG 0x01
+ #define DS2760_STATUS_IE (1 << 2)
+ #define DS2760_STATUS_SWEN (1 << 3)
+ #define DS2760_STATUS_RNAOP (1 << 4)
+ #define DS2760_STATUS_PMOD (1 << 5)
#define DS2760_EEPROM_REG 0x07
#define DS2760_SPECIAL_FEATURE_REG 0x08
#define DS2760_VOLTAGE_MSB 0x0c
@@ -38,6 +42,7 @@
#define DS2760_EEPROM_BLOCK0 0x20
#define DS2760_ACTIVE_FULL 0x20
#define DS2760_EEPROM_BLOCK1 0x30
+#define DS2760_STATUS_WRITE_REG 0x31
#define DS2760_RATED_CAPACITY 0x32
#define DS2760_CURRENT_OFFSET_BIAS 0x33
#define DS2760_ACTIVE_EMPTY 0x3b
@@ -46,5 +51,7 @@ extern int w1_ds2760_read(struct device *dev, char *buf, int addr,
size_t count);
extern int w1_ds2760_write(struct device *dev, char *buf, int addr,
size_t count);
+extern int w1_ds2760_store_eeprom(struct device *dev, int addr);
+extern int w1_ds2760_recall_eeprom(struct device *dev, int addr);
#endif /* !__w1_ds2760_h__ */
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
index b166f2852a64..b1ccc04f3c9a 100644
--- a/drivers/watchdog/Kconfig
+++ b/drivers/watchdog/Kconfig
@@ -240,6 +240,32 @@ config ORION_WATCHDOG
To compile this driver as a module, choose M here: the
module will be called orion_wdt.
+config COH901327_WATCHDOG
+ bool "ST-Ericsson COH 901 327 watchdog"
+ depends on ARCH_U300
+ default y if MACH_U300
+ help
+ Say Y here to include Watchdog timer support for the
+ watchdog embedded into the ST-Ericsson U300 series platforms.
+ This watchdog is used to reset the system and thus cannot be
+ compiled as a module.
+
+config TWL4030_WATCHDOG
+ tristate "TWL4030 Watchdog"
+ depends on TWL4030_CORE
+ help
+ Support for TI TWL4030 watchdog. Say 'Y' here to enable the
+ watchdog timer support for TWL4030 chips.
+
+config STMP3XXX_WATCHDOG
+ tristate "Freescale STMP3XXX watchdog"
+ depends on ARCH_STMP3XXX
+ help
+ Say Y here if to include support for the watchdog timer
+ for the Sigmatel STMP37XX/378X SoC.
+ To compile this driver as a module, choose M here: the
+ module will be called stmp3xxx_wdt.
+
# AVR32 Architecture
config AT32AP700X_WDT
@@ -703,6 +729,12 @@ config SBC_EPX_C3_WATCHDOG
# MIPS Architecture
+config BCM47XX_WDT
+ tristate "Broadcom BCM47xx Watchdog Timer"
+ depends on BCM47XX
+ help
+ Hardware driver for the Broadcom BCM47xx Watchog Timer.
+
config RC32434_WDT
tristate "IDT RC32434 SoC Watchdog Timer"
depends on MIKROTIK_RB532
@@ -729,6 +761,15 @@ config WDT_MTX1
Hardware driver for the MTX-1 boards. This is a watchdog timer that
will reboot the machine after a 100 seconds timer expired.
+config PNX833X_WDT
+ tristate "PNX833x Hardware Watchdog"
+ depends on SOC_PNX8335
+ help
+ Hardware driver for the PNX833x's watchdog. This is a
+ watchdog timer that will reboot the machine after a programable
+ timer has expired and no process has written to /dev/watchdog during
+ that time.
+
config WDT_RM9K_GPI
tristate "RM9000/GPI hardware watchdog"
depends on CPU_RM9000
@@ -966,24 +1007,16 @@ config WDTPCI
---help---
If you have a PCI-WDT500/501 watchdog board, say Y here, otherwise N.
- To compile this driver as a module, choose M here: the
- module will be called wdt_pci.
-
-config WDT_501_PCI
- bool "PCI-WDT501 features"
- depends on WDTPCI
- help
- Saying Y here and creating a character special file /dev/temperature
- with major number 10 and minor number 131 ("man mknod") will give
- you a thermometer inside your computer: reading from
- /dev/temperature yields one byte, the temperature in degrees
- Fahrenheit. This works only if you have a PCI-WDT501 watchdog board
- installed.
+ If you have a PCI-WDT501 watchdog board then you can enable the
+ temperature sensor by setting the type parameter to 501.
If you want to enable the Fan Tachometer on the PCI-WDT501, then you
can do this via the tachometer parameter. Only do this if you have a
fan tachometer actually set up.
+ To compile this driver as a module, choose M here: the
+ module will be called wdt_pci.
+
#
# USB-based Watchdog Cards
#
diff --git a/drivers/watchdog/Makefile b/drivers/watchdog/Makefile
index c3afa14d5be1..3d774294a2b7 100644
--- a/drivers/watchdog/Makefile
+++ b/drivers/watchdog/Makefile
@@ -28,6 +28,7 @@ obj-$(CONFIG_USBPCWATCHDOG) += pcwd_usb.o
obj-$(CONFIG_AT91RM9200_WATCHDOG) += at91rm9200_wdt.o
obj-$(CONFIG_AT91SAM9X_WATCHDOG) += at91sam9_wdt.o
obj-$(CONFIG_OMAP_WATCHDOG) += omap_wdt.o
+obj-$(CONFIG_TWL4030_WATCHDOG) += twl4030_wdt.o
obj-$(CONFIG_21285_WATCHDOG) += wdt285.o
obj-$(CONFIG_977_WATCHDOG) += wdt977.o
obj-$(CONFIG_IXP2000_WATCHDOG) += ixp2000_wdt.o
@@ -41,6 +42,8 @@ obj-$(CONFIG_PNX4008_WATCHDOG) += pnx4008_wdt.o
obj-$(CONFIG_IOP_WATCHDOG) += iop_wdt.o
obj-$(CONFIG_DAVINCI_WATCHDOG) += davinci_wdt.o
obj-$(CONFIG_ORION_WATCHDOG) += orion_wdt.o
+obj-$(CONFIG_COH901327_WATCHDOG) += coh901327_wdt.o
+obj-$(CONFIG_STMP3XXX_WATCHDOG) += stmp3xxx_wdt.o
# AVR32 Architecture
obj-$(CONFIG_AT32AP700X_WDT) += at32ap700x_wdt.o
@@ -98,9 +101,11 @@ obj-$(CONFIG_SBC_EPX_C3_WATCHDOG) += sbc_epx_c3.o
# M68KNOMMU Architecture
# MIPS Architecture
+obj-$(CONFIG_BCM47XX_WDT) += bcm47xx_wdt.o
obj-$(CONFIG_RC32434_WDT) += rc32434_wdt.o
obj-$(CONFIG_INDYDOG) += indydog.o
obj-$(CONFIG_WDT_MTX1) += mtx-1_wdt.o
+obj-$(CONFIG_PNX833X_WDT) += pnx833x_wdt.o
obj-$(CONFIG_WDT_RM9K_GPI) += rm9k_wdt.o
obj-$(CONFIG_SIBYTE_WDOG) += sb_wdog.o
obj-$(CONFIG_AR7_WDT) += ar7_wdt.o
diff --git a/drivers/watchdog/bcm47xx_wdt.c b/drivers/watchdog/bcm47xx_wdt.c
new file mode 100644
index 000000000000..751c003864ad
--- /dev/null
+++ b/drivers/watchdog/bcm47xx_wdt.c
@@ -0,0 +1,286 @@
+/*
+ * Watchdog driver for Broadcom BCM47XX
+ *
+ * Copyright (C) 2008 Aleksandar Radovanovic <biblbroks@sezampro.rs>
+ * Copyright (C) 2009 Matthieu CASTET <castet.matthieu@free.fr>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/bitops.h>
+#include <linux/errno.h>
+#include <linux/fs.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/miscdevice.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/reboot.h>
+#include <linux/types.h>
+#include <linux/uaccess.h>
+#include <linux/watchdog.h>
+#include <linux/timer.h>
+#include <linux/jiffies.h>
+#include <linux/ssb/ssb_embedded.h>
+#include <asm/mach-bcm47xx/bcm47xx.h>
+
+#define DRV_NAME "bcm47xx_wdt"
+
+#define WDT_DEFAULT_TIME 30 /* seconds */
+#define WDT_MAX_TIME 255 /* seconds */
+
+static int wdt_time = WDT_DEFAULT_TIME;
+static int nowayout = WATCHDOG_NOWAYOUT;
+
+module_param(wdt_time, int, 0);
+MODULE_PARM_DESC(wdt_time, "Watchdog time in seconds. (default="
+ __MODULE_STRING(WDT_DEFAULT_TIME) ")");
+
+#ifdef CONFIG_WATCHDOG_NOWAYOUT
+module_param(nowayout, int, 0);
+MODULE_PARM_DESC(nowayout,
+ "Watchdog cannot be stopped once started (default="
+ __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
+#endif
+
+static unsigned long bcm47xx_wdt_busy;
+static char expect_release;
+static struct timer_list wdt_timer;
+static atomic_t ticks;
+
+static inline void bcm47xx_wdt_hw_start(void)
+{
+ /* this is 2,5s on 100Mhz clock and 2s on 133 Mhz */
+ ssb_watchdog_timer_set(&ssb_bcm47xx, 0xfffffff);
+}
+
+static inline int bcm47xx_wdt_hw_stop(void)
+{
+ return ssb_watchdog_timer_set(&ssb_bcm47xx, 0);
+}
+
+static void bcm47xx_timer_tick(unsigned long unused)
+{
+ if (!atomic_dec_and_test(&ticks)) {
+ bcm47xx_wdt_hw_start();
+ mod_timer(&wdt_timer, jiffies + HZ);
+ } else {
+ printk(KERN_CRIT DRV_NAME "Watchdog will fire soon!!!\n");
+ }
+}
+
+static inline void bcm47xx_wdt_pet(void)
+{
+ atomic_set(&ticks, wdt_time);
+}
+
+static void bcm47xx_wdt_start(void)
+{
+ bcm47xx_wdt_pet();
+ bcm47xx_timer_tick(0);
+}
+
+static void bcm47xx_wdt_pause(void)
+{
+ del_timer_sync(&wdt_timer);
+ bcm47xx_wdt_hw_stop();
+}
+
+static void bcm47xx_wdt_stop(void)
+{
+ bcm47xx_wdt_pause();
+}
+
+static int bcm47xx_wdt_settimeout(int new_time)
+{
+ if ((new_time <= 0) || (new_time > WDT_MAX_TIME))
+ return -EINVAL;
+
+ wdt_time = new_time;
+ return 0;
+}
+
+static int bcm47xx_wdt_open(struct inode *inode, struct file *file)
+{
+ if (test_and_set_bit(0, &bcm47xx_wdt_busy))
+ return -EBUSY;
+
+ bcm47xx_wdt_start();
+ return nonseekable_open(inode, file);
+}
+
+static int bcm47xx_wdt_release(struct inode *inode, struct file *file)
+{
+ if (expect_release == 42) {
+ bcm47xx_wdt_stop();
+ } else {
+ printk(KERN_CRIT DRV_NAME
+ ": Unexpected close, not stopping watchdog!\n");
+ bcm47xx_wdt_start();
+ }
+
+ clear_bit(0, &bcm47xx_wdt_busy);
+ expect_release = 0;
+ return 0;
+}
+
+static ssize_t bcm47xx_wdt_write(struct file *file, const char __user *data,
+ size_t len, loff_t *ppos)
+{
+ if (len) {
+ if (!nowayout) {
+ size_t i;
+
+ expect_release = 0;
+
+ for (i = 0; i != len; i++) {
+ char c;
+ if (get_user(c, data + i))
+ return -EFAULT;
+ if (c == 'V')
+ expect_release = 42;
+ }
+ }
+ bcm47xx_wdt_pet();
+ }
+ return len;
+}
+
+static struct watchdog_info bcm47xx_wdt_info = {
+ .identity = DRV_NAME,
+ .options = WDIOF_SETTIMEOUT |
+ WDIOF_KEEPALIVEPING |
+ WDIOF_MAGICCLOSE,
+};
+
+static long bcm47xx_wdt_ioctl(struct file *file,
+ unsigned int cmd, unsigned long arg)
+{
+ void __user *argp = (void __user *)arg;
+ int __user *p = argp;
+ int new_value, retval = -EINVAL;
+
+ switch (cmd) {
+ case WDIOC_GETSUPPORT:
+ return copy_to_user(argp, &bcm47xx_wdt_info,
+ sizeof(bcm47xx_wdt_info)) ? -EFAULT : 0;
+
+ case WDIOC_GETSTATUS:
+ case WDIOC_GETBOOTSTATUS:
+ return put_user(0, p);
+
+ case WDIOC_SETOPTIONS:
+ if (get_user(new_value, p))
+ return -EFAULT;
+
+ if (new_value & WDIOS_DISABLECARD) {
+ bcm47xx_wdt_stop();
+ retval = 0;
+ }
+
+ if (new_value & WDIOS_ENABLECARD) {
+ bcm47xx_wdt_start();
+ retval = 0;
+ }
+
+ return retval;
+
+ case WDIOC_KEEPALIVE:
+ bcm47xx_wdt_pet();
+ return 0;
+
+ case WDIOC_SETTIMEOUT:
+ if (get_user(new_value, p))
+ return -EFAULT;
+
+ if (bcm47xx_wdt_settimeout(new_value))
+ return -EINVAL;
+
+ bcm47xx_wdt_pet();
+
+ case WDIOC_GETTIMEOUT:
+ return put_user(wdt_time, p);
+
+ default:
+ return -ENOTTY;
+ }
+}
+
+static int bcm47xx_wdt_notify_sys(struct notifier_block *this,
+ unsigned long code, void *unused)
+{
+ if (code == SYS_DOWN || code == SYS_HALT)
+ bcm47xx_wdt_stop();
+ return NOTIFY_DONE;
+}
+
+static const struct file_operations bcm47xx_wdt_fops = {
+ .owner = THIS_MODULE,
+ .llseek = no_llseek,
+ .unlocked_ioctl = bcm47xx_wdt_ioctl,
+ .open = bcm47xx_wdt_open,
+ .release = bcm47xx_wdt_release,
+ .write = bcm47xx_wdt_write,
+};
+
+static struct miscdevice bcm47xx_wdt_miscdev = {
+ .minor = WATCHDOG_MINOR,
+ .name = "watchdog",
+ .fops = &bcm47xx_wdt_fops,
+};
+
+static struct notifier_block bcm47xx_wdt_notifier = {
+ .notifier_call = bcm47xx_wdt_notify_sys,
+};
+
+static int __init bcm47xx_wdt_init(void)
+{
+ int ret;
+
+ if (bcm47xx_wdt_hw_stop() < 0)
+ return -ENODEV;
+
+ setup_timer(&wdt_timer, bcm47xx_timer_tick, 0L);
+
+ if (bcm47xx_wdt_settimeout(wdt_time)) {
+ bcm47xx_wdt_settimeout(WDT_DEFAULT_TIME);
+ printk(KERN_INFO DRV_NAME ": "
+ "wdt_time value must be 0 < wdt_time < %d, using %d\n",
+ (WDT_MAX_TIME + 1), wdt_time);
+ }
+
+ ret = register_reboot_notifier(&bcm47xx_wdt_notifier);
+ if (ret)
+ return ret;
+
+ ret = misc_register(&bcm47xx_wdt_miscdev);
+ if (ret) {
+ unregister_reboot_notifier(&bcm47xx_wdt_notifier);
+ return ret;
+ }
+
+ printk(KERN_INFO "BCM47xx Watchdog Timer enabled (%d seconds%s)\n",
+ wdt_time, nowayout ? ", nowayout" : "");
+ return 0;
+}
+
+static void __exit bcm47xx_wdt_exit(void)
+{
+ if (!nowayout)
+ bcm47xx_wdt_stop();
+
+ misc_deregister(&bcm47xx_wdt_miscdev);
+
+ unregister_reboot_notifier(&bcm47xx_wdt_notifier);
+}
+
+module_init(bcm47xx_wdt_init);
+module_exit(bcm47xx_wdt_exit);
+
+MODULE_AUTHOR("Aleksandar Radovanovic");
+MODULE_DESCRIPTION("Watchdog driver for Broadcom BCM47xx");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
diff --git a/drivers/watchdog/coh901327_wdt.c b/drivers/watchdog/coh901327_wdt.c
new file mode 100644
index 000000000000..fecb307d28e9
--- /dev/null
+++ b/drivers/watchdog/coh901327_wdt.c
@@ -0,0 +1,537 @@
+/*
+ * coh901327_wdt.c
+ *
+ * Copyright (C) 2008-2009 ST-Ericsson AB
+ * License terms: GNU General Public License (GPL) version 2
+ * Watchdog driver for the ST-Ericsson AB COH 901 327 IP core
+ * Author: Linus Walleij <linus.walleij@stericsson.com>
+ */
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/fs.h>
+#include <linux/miscdevice.h>
+#include <linux/watchdog.h>
+#include <linux/interrupt.h>
+#include <linux/pm.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/bitops.h>
+#include <linux/uaccess.h>
+#include <linux/clk.h>
+
+#define DRV_NAME "WDOG COH 901 327"
+
+/*
+ * COH 901 327 register definitions
+ */
+
+/* WDOG_FEED Register 32bit (-/W) */
+#define U300_WDOG_FR 0x00
+#define U300_WDOG_FR_FEED_RESTART_TIMER 0xFEEDU
+/* WDOG_TIMEOUT Register 32bit (R/W) */
+#define U300_WDOG_TR 0x04
+#define U300_WDOG_TR_TIMEOUT_MASK 0x7FFFU
+/* WDOG_DISABLE1 Register 32bit (-/W) */
+#define U300_WDOG_D1R 0x08
+#define U300_WDOG_D1R_DISABLE1_DISABLE_TIMER 0x2BADU
+/* WDOG_DISABLE2 Register 32bit (R/W) */
+#define U300_WDOG_D2R 0x0C
+#define U300_WDOG_D2R_DISABLE2_DISABLE_TIMER 0xCAFEU
+#define U300_WDOG_D2R_DISABLE_STATUS_DISABLED 0xDABEU
+#define U300_WDOG_D2R_DISABLE_STATUS_ENABLED 0x0000U
+/* WDOG_STATUS Register 32bit (R/W) */
+#define U300_WDOG_SR 0x10
+#define U300_WDOG_SR_STATUS_TIMED_OUT 0xCFE8U
+#define U300_WDOG_SR_STATUS_NORMAL 0x0000U
+#define U300_WDOG_SR_RESET_STATUS_RESET 0xE8B4U
+/* WDOG_COUNT Register 32bit (R/-) */
+#define U300_WDOG_CR 0x14
+#define U300_WDOG_CR_VALID_IND 0x8000U
+#define U300_WDOG_CR_VALID_STABLE 0x0000U
+#define U300_WDOG_CR_COUNT_VALUE_MASK 0x7FFFU
+/* WDOG_JTAGOVR Register 32bit (R/W) */
+#define U300_WDOG_JOR 0x18
+#define U300_WDOG_JOR_JTAG_MODE_IND 0x0002U
+#define U300_WDOG_JOR_JTAG_WATCHDOG_ENABLE 0x0001U
+/* WDOG_RESTART Register 32bit (-/W) */
+#define U300_WDOG_RR 0x1C
+#define U300_WDOG_RR_RESTART_VALUE_RESUME 0xACEDU
+/* WDOG_IRQ_EVENT Register 32bit (R/W) */
+#define U300_WDOG_IER 0x20
+#define U300_WDOG_IER_WILL_BARK_IRQ_EVENT_IND 0x0001U
+#define U300_WDOG_IER_WILL_BARK_IRQ_ACK_ENABLE 0x0001U
+/* WDOG_IRQ_MASK Register 32bit (R/W) */
+#define U300_WDOG_IMR 0x24
+#define U300_WDOG_IMR_WILL_BARK_IRQ_ENABLE 0x0001U
+/* WDOG_IRQ_FORCE Register 32bit (R/W) */
+#define U300_WDOG_IFR 0x28
+#define U300_WDOG_IFR_WILL_BARK_IRQ_FORCE_ENABLE 0x0001U
+
+/* Default timeout in seconds = 1 minute */
+static int margin = 60;
+static resource_size_t phybase;
+static resource_size_t physize;
+static int irq;
+static void __iomem *virtbase;
+static unsigned long coh901327_users;
+static unsigned long boot_status;
+static u16 wdogenablestore;
+static u16 irqmaskstore;
+static struct device *parent;
+
+/*
+ * The watchdog block is of course always clocked, the
+ * clk_enable()/clk_disable() calls are mainly for performing reference
+ * counting higher up in the clock hierarchy.
+ */
+static struct clk *clk;
+
+/*
+ * Enabling and disabling functions.
+ */
+static void coh901327_enable(u16 timeout)
+{
+ u16 val;
+
+ clk_enable(clk);
+ /* Restart timer if it is disabled */
+ val = readw(virtbase + U300_WDOG_D2R);
+ if (val == U300_WDOG_D2R_DISABLE_STATUS_DISABLED)
+ writew(U300_WDOG_RR_RESTART_VALUE_RESUME,
+ virtbase + U300_WDOG_RR);
+ /* Acknowledge any pending interrupt so it doesn't just fire off */
+ writew(U300_WDOG_IER_WILL_BARK_IRQ_ACK_ENABLE,
+ virtbase + U300_WDOG_IER);
+ /* Enable the watchdog interrupt */
+ writew(U300_WDOG_IMR_WILL_BARK_IRQ_ENABLE, virtbase + U300_WDOG_IMR);
+ /* Activate the watchdog timer */
+ writew(timeout, virtbase + U300_WDOG_TR);
+ /* Start the watchdog timer */
+ writew(U300_WDOG_FR_FEED_RESTART_TIMER, virtbase + U300_WDOG_FR);
+ /*
+ * Extra read so that this change propagate in the watchdog.
+ */
+ (void) readw(virtbase + U300_WDOG_CR);
+ val = readw(virtbase + U300_WDOG_D2R);
+ clk_disable(clk);
+ if (val != U300_WDOG_D2R_DISABLE_STATUS_ENABLED)
+ dev_err(parent,
+ "%s(): watchdog not enabled! D2R value %04x\n",
+ __func__, val);
+}
+
+static void coh901327_disable(void)
+{
+ u16 val;
+
+ clk_enable(clk);
+ /* Disable the watchdog interrupt if it is active */
+ writew(0x0000U, virtbase + U300_WDOG_IMR);
+ /* If the watchdog is currently enabled, attempt to disable it */
+ val = readw(virtbase + U300_WDOG_D2R);
+ if (val != U300_WDOG_D2R_DISABLE_STATUS_DISABLED) {
+ writew(U300_WDOG_D1R_DISABLE1_DISABLE_TIMER,
+ virtbase + U300_WDOG_D1R);
+ writew(U300_WDOG_D2R_DISABLE2_DISABLE_TIMER,
+ virtbase + U300_WDOG_D2R);
+ /* Write this twice (else problems occur) */
+ writew(U300_WDOG_D2R_DISABLE2_DISABLE_TIMER,
+ virtbase + U300_WDOG_D2R);
+ }
+ val = readw(virtbase + U300_WDOG_D2R);
+ clk_disable(clk);
+ if (val != U300_WDOG_D2R_DISABLE_STATUS_DISABLED)
+ dev_err(parent,
+ "%s(): watchdog not disabled! D2R value %04x\n",
+ __func__, val);
+}
+
+static void coh901327_start(void)
+{
+ coh901327_enable(margin * 100);
+}
+
+static void coh901327_keepalive(void)
+{
+ clk_enable(clk);
+ /* Feed the watchdog */
+ writew(U300_WDOG_FR_FEED_RESTART_TIMER,
+ virtbase + U300_WDOG_FR);
+ clk_disable(clk);
+}
+
+static int coh901327_settimeout(int time)
+{
+ /*
+ * Max margin is 327 since the 10ms
+ * timeout register is max
+ * 0x7FFF = 327670ms ~= 327s.
+ */
+ if (time <= 0 || time > 327)
+ return -EINVAL;
+
+ margin = time;
+ clk_enable(clk);
+ /* Set new timeout value */
+ writew(margin * 100, virtbase + U300_WDOG_TR);
+ /* Feed the dog */
+ writew(U300_WDOG_FR_FEED_RESTART_TIMER,
+ virtbase + U300_WDOG_FR);
+ clk_disable(clk);
+ return 0;
+}
+
+/*
+ * This interrupt occurs 10 ms before the watchdog WILL bark.
+ */
+static irqreturn_t coh901327_interrupt(int irq, void *data)
+{
+ u16 val;
+
+ /*
+ * Ack IRQ? If this occurs we're FUBAR anyway, so
+ * just acknowledge, disable the interrupt and await the imminent end.
+ * If you at some point need a host of callbacks to be called
+ * when the system is about to watchdog-reset, add them here!
+ *
+ * NOTE: on future versions of this IP-block, it will be possible
+ * to prevent a watchdog reset by feeding the watchdog at this
+ * point.
+ */
+ clk_enable(clk);
+ val = readw(virtbase + U300_WDOG_IER);
+ if (val == U300_WDOG_IER_WILL_BARK_IRQ_EVENT_IND)
+ writew(U300_WDOG_IER_WILL_BARK_IRQ_ACK_ENABLE,
+ virtbase + U300_WDOG_IER);
+ writew(0x0000U, virtbase + U300_WDOG_IMR);
+ clk_disable(clk);
+ dev_crit(parent, "watchdog is barking!\n");
+ return IRQ_HANDLED;
+}
+
+/*
+ * Allow only one user (daemon) to open the watchdog
+ */
+static int coh901327_open(struct inode *inode, struct file *file)
+{
+ if (test_and_set_bit(1, &coh901327_users))
+ return -EBUSY;
+ coh901327_start();
+ return nonseekable_open(inode, file);
+}
+
+static int coh901327_release(struct inode *inode, struct file *file)
+{
+ clear_bit(1, &coh901327_users);
+ coh901327_disable();
+ return 0;
+}
+
+static ssize_t coh901327_write(struct file *file, const char __user *data,
+ size_t len, loff_t *ppos)
+{
+ if (len)
+ coh901327_keepalive();
+ return len;
+}
+
+static long coh901327_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ int ret = -ENOTTY;
+ u16 val;
+ int time;
+ int new_options;
+ union {
+ struct watchdog_info __user *ident;
+ int __user *i;
+ } uarg;
+ static struct watchdog_info ident = {
+ .options = WDIOF_CARDRESET |
+ WDIOF_SETTIMEOUT |
+ WDIOF_KEEPALIVEPING,
+ .identity = "COH 901 327 Watchdog",
+ .firmware_version = 1,
+ };
+ uarg.i = (int __user *)arg;
+
+ switch (cmd) {
+ case WDIOC_GETSUPPORT:
+ ret = copy_to_user(uarg.ident, &ident,
+ sizeof(ident)) ? -EFAULT : 0;
+ break;
+
+ case WDIOC_GETSTATUS:
+ ret = put_user(0, uarg.i);
+ break;
+
+ case WDIOC_GETBOOTSTATUS:
+ ret = put_user(boot_status, uarg.i);
+ break;
+
+ case WDIOC_SETOPTIONS:
+ ret = get_user(new_options, uarg.i);
+ if (ret)
+ break;
+ if (new_options & WDIOS_DISABLECARD)
+ coh901327_disable();
+ if (new_options & WDIOS_ENABLECARD)
+ coh901327_start();
+ ret = 0;
+ break;
+
+ case WDIOC_KEEPALIVE:
+ coh901327_keepalive();
+ ret = 0;
+ break;
+
+ case WDIOC_SETTIMEOUT:
+ ret = get_user(time, uarg.i);
+ if (ret)
+ break;
+
+ ret = coh901327_settimeout(time);
+ if (ret)
+ break;
+ /* Then fall through to return set value */
+
+ case WDIOC_GETTIMEOUT:
+ ret = put_user(margin, uarg.i);
+ break;
+
+ case WDIOC_GETTIMELEFT:
+ clk_enable(clk);
+ /* Read repeatedly until the value is stable! */
+ val = readw(virtbase + U300_WDOG_CR);
+ while (val & U300_WDOG_CR_VALID_IND)
+ val = readw(virtbase + U300_WDOG_CR);
+ val &= U300_WDOG_CR_COUNT_VALUE_MASK;
+ clk_disable(clk);
+ if (val != 0)
+ val /= 100;
+ ret = put_user(val, uarg.i);
+ break;
+ }
+ return ret;
+}
+
+static const struct file_operations coh901327_fops = {
+ .owner = THIS_MODULE,
+ .llseek = no_llseek,
+ .write = coh901327_write,
+ .unlocked_ioctl = coh901327_ioctl,
+ .open = coh901327_open,
+ .release = coh901327_release,
+};
+
+static struct miscdevice coh901327_miscdev = {
+ .minor = WATCHDOG_MINOR,
+ .name = "watchdog",
+ .fops = &coh901327_fops,
+};
+
+static int __exit coh901327_remove(struct platform_device *pdev)
+{
+ misc_deregister(&coh901327_miscdev);
+ coh901327_disable();
+ free_irq(irq, pdev);
+ clk_put(clk);
+ iounmap(virtbase);
+ release_mem_region(phybase, physize);
+ return 0;
+}
+
+
+static int __init coh901327_probe(struct platform_device *pdev)
+{
+ int ret;
+ u16 val;
+ struct resource *res;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -ENOENT;
+
+ parent = &pdev->dev;
+ physize = resource_size(res);
+ phybase = res->start;
+
+ if (request_mem_region(phybase, physize, DRV_NAME) == NULL) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ virtbase = ioremap(phybase, physize);
+ if (!virtbase) {
+ ret = -ENOMEM;
+ goto out_no_remap;
+ }
+
+ clk = clk_get(&pdev->dev, NULL);
+ if (IS_ERR(clk)) {
+ ret = PTR_ERR(clk);
+ dev_err(&pdev->dev, "could not get clock\n");
+ goto out_no_clk;
+ }
+ ret = clk_enable(clk);
+ if (ret) {
+ dev_err(&pdev->dev, "could not enable clock\n");
+ goto out_no_clk_enable;
+ }
+
+ val = readw(virtbase + U300_WDOG_SR);
+ switch (val) {
+ case U300_WDOG_SR_STATUS_TIMED_OUT:
+ dev_info(&pdev->dev,
+ "watchdog timed out since last chip reset!\n");
+ boot_status = WDIOF_CARDRESET;
+ /* Status will be cleared below */
+ break;
+ case U300_WDOG_SR_STATUS_NORMAL:
+ dev_info(&pdev->dev,
+ "in normal status, no timeouts have occurred.\n");
+ break;
+ default:
+ dev_info(&pdev->dev,
+ "contains an illegal status code (%08x)\n", val);
+ break;
+ }
+
+ val = readw(virtbase + U300_WDOG_D2R);
+ switch (val) {
+ case U300_WDOG_D2R_DISABLE_STATUS_DISABLED:
+ dev_info(&pdev->dev, "currently disabled.\n");
+ break;
+ case U300_WDOG_D2R_DISABLE_STATUS_ENABLED:
+ dev_info(&pdev->dev,
+ "currently enabled! (disabling it now)\n");
+ coh901327_disable();
+ break;
+ default:
+ dev_err(&pdev->dev,
+ "contains an illegal enable/disable code (%08x)\n",
+ val);
+ break;
+ }
+
+ /* Reset the watchdog */
+ writew(U300_WDOG_SR_RESET_STATUS_RESET, virtbase + U300_WDOG_SR);
+
+ irq = platform_get_irq(pdev, 0);
+ if (request_irq(irq, coh901327_interrupt, IRQF_DISABLED,
+ DRV_NAME " Bark", pdev)) {
+ ret = -EIO;
+ goto out_no_irq;
+ }
+
+ clk_disable(clk);
+
+ ret = misc_register(&coh901327_miscdev);
+ if (ret == 0)
+ dev_info(&pdev->dev,
+ "initialized. timer margin=%d sec\n", margin);
+ else
+ goto out_no_wdog;
+
+ return 0;
+
+out_no_wdog:
+ free_irq(irq, pdev);
+out_no_irq:
+ clk_disable(clk);
+out_no_clk_enable:
+ clk_put(clk);
+out_no_clk:
+ iounmap(virtbase);
+out_no_remap:
+ release_mem_region(phybase, SZ_4K);
+out:
+ return ret;
+}
+
+#ifdef CONFIG_PM
+static int coh901327_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ irqmaskstore = readw(virtbase + U300_WDOG_IMR) & 0x0001U;
+ wdogenablestore = readw(virtbase + U300_WDOG_D2R);
+ /* If watchdog is on, disable it here and now */
+ if (wdogenablestore == U300_WDOG_D2R_DISABLE_STATUS_ENABLED)
+ coh901327_disable();
+ return 0;
+}
+
+static int coh901327_resume(struct platform_device *pdev)
+{
+ /* Restore the watchdog interrupt */
+ writew(irqmaskstore, virtbase + U300_WDOG_IMR);
+ if (wdogenablestore == U300_WDOG_D2R_DISABLE_STATUS_ENABLED) {
+ /* Restart the watchdog timer */
+ writew(U300_WDOG_RR_RESTART_VALUE_RESUME,
+ virtbase + U300_WDOG_RR);
+ writew(U300_WDOG_FR_FEED_RESTART_TIMER,
+ virtbase + U300_WDOG_FR);
+ }
+ return 0;
+}
+#else
+#define coh901327_suspend NULL
+#define coh901327_resume NULL
+#endif
+
+/*
+ * Mistreating the watchdog is the only way to perform a software reset of the
+ * system on EMP platforms. So we implement this and export a symbol for it.
+ */
+void coh901327_watchdog_reset(void)
+{
+ /* Enable even if on JTAG too */
+ writew(U300_WDOG_JOR_JTAG_WATCHDOG_ENABLE,
+ virtbase + U300_WDOG_JOR);
+ /*
+ * Timeout = 5s, we have to wait for the watchdog reset to
+ * actually take place: the watchdog will be reloaded with the
+ * default value immediately, so we HAVE to reboot and get back
+ * into the kernel in 30s, or the device will reboot again!
+ * The boot loader will typically deactivate the watchdog, so we
+ * need time enough for the boot loader to get to the point of
+ * deactivating the watchdog before it is shut down by it.
+ *
+ * NOTE: on future versions of the watchdog, this restriction is
+ * gone: the watchdog will be reloaded with a defaul value (1 min)
+ * instead of last value, and you can conveniently set the watchdog
+ * timeout to 10ms (value = 1) without any problems.
+ */
+ coh901327_enable(500);
+ /* Return and await doom */
+}
+
+static struct platform_driver coh901327_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "coh901327_wdog",
+ },
+ .remove = __exit_p(coh901327_remove),
+ .suspend = coh901327_suspend,
+ .resume = coh901327_resume,
+};
+
+static int __init coh901327_init(void)
+{
+ return platform_driver_probe(&coh901327_driver, coh901327_probe);
+}
+module_init(coh901327_init);
+
+static void __exit coh901327_exit(void)
+{
+ platform_driver_unregister(&coh901327_driver);
+}
+module_exit(coh901327_exit);
+
+MODULE_AUTHOR("Linus Walleij <linus.walleij@stericsson.com>");
+MODULE_DESCRIPTION("COH 901 327 Watchdog");
+
+module_param(margin, int, 0);
+MODULE_PARM_DESC(margin, "Watchdog margin in seconds (default 60s)");
+
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
diff --git a/drivers/watchdog/hpwdt.c b/drivers/watchdog/hpwdt.c
index c0b9169ba5d5..a6c5674c78e6 100644
--- a/drivers/watchdog/hpwdt.c
+++ b/drivers/watchdog/hpwdt.c
@@ -120,7 +120,8 @@ static int nowayout = WATCHDOG_NOWAYOUT;
static char expect_release;
static unsigned long hpwdt_is_open;
static unsigned int allow_kdump;
-static int hpwdt_nmi_sourcing;
+static unsigned int hpwdt_nmi_sourcing;
+static unsigned int priority; /* hpwdt at end of die_notify list */
static void __iomem *pci_mem_addr; /* the PCI-memory address */
static unsigned long __iomem *hpwdt_timer_reg;
@@ -623,7 +624,7 @@ static struct miscdevice hpwdt_miscdev = {
static struct notifier_block die_notifier = {
.notifier_call = hpwdt_pretimeout,
- .priority = 0x7FFFFFFF,
+ .priority = 0,
};
/*
@@ -641,7 +642,8 @@ static void __devinit hpwdt_check_nmi_sourcing(struct pci_dev *dev)
hpwdt_nmi_sourcing = 1;
else
dev_warn(&dev->dev, "NMI sourcing is disabled. To enable this "
- "functionality you must reboot with nmi_watchdog=0.\n");
+ "functionality you must reboot with nmi_watchdog=0 "
+ "and load the hpwdt driver with priority=1.\n");
}
#else
static void __devinit hpwdt_check_nmi_sourcing(struct pci_dev *dev)
@@ -714,6 +716,14 @@ static int __devinit hpwdt_init_one(struct pci_dev *dev,
cmn_regs.u1.rah = 0x0D;
cmn_regs.u1.ral = 0x02;
+ /*
+ * If the priority is set to 1, then we will be put first on the
+ * die notify list to handle a critical NMI. The default is to
+ * be last so other users of the NMI signal can function.
+ */
+ if (priority)
+ die_notifier.priority = 0x7FFFFFFF;
+
retval = register_die_notifier(&die_notifier);
if (retval != 0) {
dev_warn(&dev->dev,
@@ -733,9 +743,11 @@ static int __devinit hpwdt_init_one(struct pci_dev *dev,
printk(KERN_INFO
"hp Watchdog Timer Driver: %s"
", timer margin: %d seconds (nowayout=%d)"
- ", allow kernel dump: %s (default = 0/OFF).\n",
+ ", allow kernel dump: %s (default = 0/OFF)"
+ ", priority: %s (default = 0/LAST).\n",
HPWDT_VERSION, soft_margin, nowayout,
- (allow_kdump == 0) ? "OFF" : "ON");
+ (allow_kdump == 0) ? "OFF" : "ON",
+ (priority == 0) ? "LAST" : "FIRST");
return 0;
@@ -798,5 +810,9 @@ module_param(nowayout, int, 0);
MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default="
__MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
+module_param(priority, int, 0);
+MODULE_PARM_DESC(priority, "The hpwdt driver handles NMIs first or last"
+ " (default = 0/Last)\n");
+
module_init(hpwdt_init);
module_exit(hpwdt_cleanup);
diff --git a/drivers/watchdog/omap_wdt.c b/drivers/watchdog/omap_wdt.c
index f2713851aaab..3ed571a2ab18 100644
--- a/drivers/watchdog/omap_wdt.c
+++ b/drivers/watchdog/omap_wdt.c
@@ -159,6 +159,7 @@ static int omap_wdt_open(struct inode *inode, struct file *file)
file->private_data = (void *) wdev;
omap_wdt_set_timeout(wdev);
+ omap_wdt_ping(wdev); /* trigger loading of new timeout value */
omap_wdt_enable(wdev);
return nonseekable_open(inode, file);
@@ -313,6 +314,9 @@ static int __devinit omap_wdt_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, wdev);
+ clk_enable(wdev->ick);
+ clk_enable(wdev->fck);
+
omap_wdt_disable(wdev);
omap_wdt_adjust_timeout(timer_margin);
@@ -332,6 +336,9 @@ static int __devinit omap_wdt_probe(struct platform_device *pdev)
/* autogate OCP interface clock */
__raw_writel(0x01, wdev->base + OMAP_WATCHDOG_SYS_CONFIG);
+ clk_disable(wdev->ick);
+ clk_disable(wdev->fck);
+
omap_wdt_dev = pdev;
return 0;
diff --git a/drivers/watchdog/pnx833x_wdt.c b/drivers/watchdog/pnx833x_wdt.c
new file mode 100644
index 000000000000..538ec2c05197
--- /dev/null
+++ b/drivers/watchdog/pnx833x_wdt.c
@@ -0,0 +1,282 @@
+/*
+ * PNX833x Hardware Watchdog Driver
+ * Copyright 2008 NXP Semiconductors
+ * Daniel Laird <daniel.j.laird@nxp.com>
+ * Andre McCurdy <andre.mccurdy@nxp.com>
+ *
+ * Heavily based upon - IndyDog 0.3
+ * A Hardware Watchdog Device for SGI IP22
+ *
+ * (c) Copyright 2002 Guido Guenther <agx@sigxcpu.org>, All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * based on softdog.c by Alan Cox <alan@redhat.com>
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/fs.h>
+#include <linux/mm.h>
+#include <linux/miscdevice.h>
+#include <linux/watchdog.h>
+#include <linux/notifier.h>
+#include <linux/reboot.h>
+#include <linux/init.h>
+#include <asm/mach-pnx833x/pnx833x.h>
+
+#define PFX "pnx833x: "
+#define WATCHDOG_TIMEOUT 30 /* 30 sec Maximum timeout */
+#define WATCHDOG_COUNT_FREQUENCY 68000000U /* Watchdog counts at 68MHZ. */
+
+/** CONFIG block */
+#define PNX833X_CONFIG (0x07000U)
+#define PNX833X_CONFIG_CPU_WATCHDOG (0x54)
+#define PNX833X_CONFIG_CPU_WATCHDOG_COMPARE (0x58)
+#define PNX833X_CONFIG_CPU_COUNTERS_CONTROL (0x1c)
+
+/** RESET block */
+#define PNX833X_RESET (0x08000U)
+#define PNX833X_RESET_CONFIG (0x08)
+
+static int pnx833x_wdt_alive;
+
+/* Set default timeout in MHZ.*/
+static int pnx833x_wdt_timeout = (WATCHDOG_TIMEOUT * WATCHDOG_COUNT_FREQUENCY);
+module_param(pnx833x_wdt_timeout, int, 0);
+MODULE_PARM_DESC(timeout, "Watchdog timeout in Mhz. (68Mhz clock), default="
+ __MODULE_STRING(pnx833x_wdt_timeout) "(30 seconds).");
+
+static int nowayout = WATCHDOG_NOWAYOUT;
+module_param(nowayout, int, 0);
+MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default="
+ __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
+
+static int start_enabled = 1;
+module_param(start_enabled, int, 0);
+MODULE_PARM_DESC(start_enabled, "Watchdog is started on module insertion "
+ "(default=" __MODULE_STRING(start_enabled) ")");
+
+static void pnx833x_wdt_start(void)
+{
+ /* Enable watchdog causing reset. */
+ PNX833X_REG(PNX833X_RESET + PNX833X_RESET_CONFIG) |= 0x1;
+ /* Set timeout.*/
+ PNX833X_REG(PNX833X_CONFIG +
+ PNX833X_CONFIG_CPU_WATCHDOG_COMPARE) = pnx833x_wdt_timeout;
+ /* Enable watchdog. */
+ PNX833X_REG(PNX833X_CONFIG +
+ PNX833X_CONFIG_CPU_COUNTERS_CONTROL) |= 0x1;
+
+ printk(KERN_INFO PFX "Started watchdog timer.\n");
+}
+
+static void pnx833x_wdt_stop(void)
+{
+ /* Disable watchdog causing reset. */
+ PNX833X_REG(PNX833X_RESET + PNX833X_CONFIG) &= 0xFFFFFFFE;
+ /* Disable watchdog.*/
+ PNX833X_REG(PNX833X_CONFIG +
+ PNX833X_CONFIG_CPU_COUNTERS_CONTROL) &= 0xFFFFFFFE;
+
+ printk(KERN_INFO PFX "Stopped watchdog timer.\n");
+}
+
+static void pnx833x_wdt_ping(void)
+{
+ PNX833X_REG(PNX833X_CONFIG +
+ PNX833X_CONFIG_CPU_WATCHDOG_COMPARE) = pnx833x_wdt_timeout;
+}
+
+/*
+ * Allow only one person to hold it open
+ */
+static int pnx833x_wdt_open(struct inode *inode, struct file *file)
+{
+ if (test_and_set_bit(0, &pnx833x_wdt_alive))
+ return -EBUSY;
+
+ if (nowayout)
+ __module_get(THIS_MODULE);
+
+ /* Activate timer */
+ if (!start_enabled)
+ pnx833x_wdt_start();
+
+ pnx833x_wdt_ping();
+
+ printk(KERN_INFO "Started watchdog timer.\n");
+
+ return nonseekable_open(inode, file);
+}
+
+static int pnx833x_wdt_release(struct inode *inode, struct file *file)
+{
+ /* Shut off the timer.
+ * Lock it in if it's a module and we defined ...NOWAYOUT */
+ if (!nowayout)
+ pnx833x_wdt_stop(); /* Turn the WDT off */
+
+ clear_bit(0, &pnx833x_wdt_alive);
+ return 0;
+}
+
+static ssize_t pnx833x_wdt_write(struct file *file, const char *data, size_t len, loff_t *ppos)
+{
+ /* Refresh the timer. */
+ if (len)
+ pnx833x_wdt_ping();
+
+ return len;
+}
+
+static long pnx833x_wdt_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ int options, new_timeout = 0;
+ uint32_t timeout, timeout_left = 0;
+
+ static struct watchdog_info ident = {
+ .options = WDIOF_KEEPALIVEPING | WDIOF_SETTIMEOUT,
+ .firmware_version = 0,
+ .identity = "Hardware Watchdog for PNX833x",
+ };
+
+ switch (cmd) {
+ default:
+ return -ENOTTY;
+
+ case WDIOC_GETSUPPORT:
+ if (copy_to_user((struct watchdog_info *)arg,
+ &ident, sizeof(ident)))
+ return -EFAULT;
+ return 0;
+
+ case WDIOC_GETSTATUS:
+ case WDIOC_GETBOOTSTATUS:
+ return put_user(0, (int *)arg);
+
+ case WDIOC_SETOPTIONS:
+ if (get_user(options, (int *)arg))
+ return -EFAULT;
+
+ if (options & WDIOS_DISABLECARD)
+ pnx833x_wdt_stop();
+
+ if (options & WDIOS_ENABLECARD)
+ pnx833x_wdt_start();
+
+ return 0;
+
+ case WDIOC_KEEPALIVE:
+ pnx833x_wdt_ping();
+ return 0;
+
+ case WDIOC_SETTIMEOUT:
+ {
+ if (get_user(new_timeout, (int *)arg))
+ return -EFAULT;
+
+ pnx833x_wdt_timeout = new_timeout;
+ PNX833X_REG(PNX833X_CONFIG +
+ PNX833X_CONFIG_CPU_WATCHDOG_COMPARE) = new_timeout;
+ return put_user(new_timeout, (int *)arg);
+ }
+
+ case WDIOC_GETTIMEOUT:
+ timeout = PNX833X_REG(PNX833X_CONFIG +
+ PNX833X_CONFIG_CPU_WATCHDOG_COMPARE);
+ return put_user(timeout, (int *)arg);
+
+ case WDIOC_GETTIMELEFT:
+ timeout_left = PNX833X_REG(PNX833X_CONFIG +
+ PNX833X_CONFIG_CPU_WATCHDOG);
+ return put_user(timeout_left, (int *)arg);
+
+ }
+}
+
+static int pnx833x_wdt_notify_sys(struct notifier_block *this,
+ unsigned long code, void *unused)
+{
+ if (code == SYS_DOWN || code == SYS_HALT)
+ pnx833x_wdt_stop(); /* Turn the WDT off */
+
+ return NOTIFY_DONE;
+}
+
+static const struct file_operations pnx833x_wdt_fops = {
+ .owner = THIS_MODULE,
+ .llseek = no_llseek,
+ .write = pnx833x_wdt_write,
+ .unlocked_ioctl = pnx833x_wdt_ioctl,
+ .open = pnx833x_wdt_open,
+ .release = pnx833x_wdt_release,
+};
+
+static struct miscdevice pnx833x_wdt_miscdev = {
+ .minor = WATCHDOG_MINOR,
+ .name = "watchdog",
+ .fops = &pnx833x_wdt_fops,
+};
+
+static struct notifier_block pnx833x_wdt_notifier = {
+ .notifier_call = pnx833x_wdt_notify_sys,
+};
+
+static char banner[] __initdata =
+ KERN_INFO PFX "Hardware Watchdog Timer for PNX833x: Version 0.1\n";
+
+static int __init watchdog_init(void)
+{
+ int ret, cause;
+
+ /* Lets check the reason for the reset.*/
+ cause = PNX833X_REG(PNX833X_RESET);
+ /*If bit 31 is set then watchdog was cause of reset.*/
+ if (cause & 0x80000000) {
+ printk(KERN_INFO PFX "The system was previously reset due to "
+ "the watchdog firing - please investigate...\n");
+ }
+
+ ret = register_reboot_notifier(&pnx833x_wdt_notifier);
+ if (ret) {
+ printk(KERN_ERR PFX
+ "cannot register reboot notifier (err=%d)\n", ret);
+ return ret;
+ }
+
+ ret = misc_register(&pnx833x_wdt_miscdev);
+ if (ret) {
+ printk(KERN_ERR PFX
+ "cannot register miscdev on minor=%d (err=%d)\n",
+ WATCHDOG_MINOR, ret);
+ unregister_reboot_notifier(&pnx833x_wdt_notifier);
+ return ret;
+ }
+
+ printk(banner);
+ if (start_enabled)
+ pnx833x_wdt_start();
+
+ return 0;
+}
+
+static void __exit watchdog_exit(void)
+{
+ misc_deregister(&pnx833x_wdt_miscdev);
+ unregister_reboot_notifier(&pnx833x_wdt_notifier);
+}
+
+module_init(watchdog_init);
+module_exit(watchdog_exit);
+
+MODULE_AUTHOR("Daniel Laird/Andre McCurdy");
+MODULE_DESCRIPTION("Hardware Watchdog Device for PNX833x");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
diff --git a/drivers/watchdog/sa1100_wdt.c b/drivers/watchdog/sa1100_wdt.c
index ee1caae4d33b..016245419fad 100644
--- a/drivers/watchdog/sa1100_wdt.c
+++ b/drivers/watchdog/sa1100_wdt.c
@@ -38,7 +38,7 @@
static unsigned long oscr_freq;
static unsigned long sa1100wdt_users;
-static int pre_margin;
+static unsigned int pre_margin;
static int boot_status;
/*
@@ -84,6 +84,7 @@ static const struct watchdog_info ident = {
.options = WDIOF_CARDRESET | WDIOF_SETTIMEOUT
| WDIOF_KEEPALIVEPING,
.identity = "SA1100/PXA255 Watchdog",
+ .firmware_version = 1,
};
static long sa1100dog_ioctl(struct file *file, unsigned int cmd,
@@ -118,7 +119,7 @@ static long sa1100dog_ioctl(struct file *file, unsigned int cmd,
if (ret)
break;
- if (time <= 0 || time > 255) {
+ if (time <= 0 || (oscr_freq * (long long)time >= 0xffffffff)) {
ret = -EINVAL;
break;
}
diff --git a/drivers/watchdog/stmp3xxx_wdt.c b/drivers/watchdog/stmp3xxx_wdt.c
new file mode 100644
index 000000000000..5dd952681f32
--- /dev/null
+++ b/drivers/watchdog/stmp3xxx_wdt.c
@@ -0,0 +1,296 @@
+/*
+ * Watchdog driver for Freescale STMP37XX/STMP378X
+ *
+ * Author: Vitaly Wool <vital@embeddedalley.com>
+ *
+ * Copyright 2008 Freescale Semiconductor, Inc. All Rights Reserved.
+ * Copyright 2008 Embedded Alley Solutions, Inc All Rights Reserved.
+ */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/fs.h>
+#include <linux/miscdevice.h>
+#include <linux/watchdog.h>
+#include <linux/platform_device.h>
+#include <linux/spinlock.h>
+#include <linux/uaccess.h>
+
+#include <mach/platform.h>
+#include <mach/regs-rtc.h>
+
+#define DEFAULT_HEARTBEAT 19
+#define MAX_HEARTBEAT (0x10000000 >> 6)
+
+/* missing bitmask in headers */
+#define BV_RTC_PERSISTENT1_GENERAL__RTC_FORCE_UPDATER 0x80000000
+
+#define WDT_IN_USE 0
+#define WDT_OK_TO_CLOSE 1
+
+#define WDOG_COUNTER_RATE 1000 /* 1 kHz clock */
+
+static DEFINE_SPINLOCK(stmp3xxx_wdt_io_lock);
+static unsigned long wdt_status;
+static const int nowayout = WATCHDOG_NOWAYOUT;
+static int heartbeat = DEFAULT_HEARTBEAT;
+static unsigned long boot_status;
+
+static void wdt_enable(u32 value)
+{
+ spin_lock(&stmp3xxx_wdt_io_lock);
+ __raw_writel(value, REGS_RTC_BASE + HW_RTC_WATCHDOG);
+ stmp3xxx_setl(BM_RTC_CTRL_WATCHDOGEN, REGS_RTC_BASE + HW_RTC_CTRL);
+ stmp3xxx_setl(BV_RTC_PERSISTENT1_GENERAL__RTC_FORCE_UPDATER,
+ REGS_RTC_BASE + HW_RTC_PERSISTENT1);
+ spin_unlock(&stmp3xxx_wdt_io_lock);
+}
+
+static void wdt_disable(void)
+{
+ spin_lock(&stmp3xxx_wdt_io_lock);
+ stmp3xxx_clearl(BV_RTC_PERSISTENT1_GENERAL__RTC_FORCE_UPDATER,
+ REGS_RTC_BASE + HW_RTC_PERSISTENT1);
+ stmp3xxx_clearl(BM_RTC_CTRL_WATCHDOGEN, REGS_RTC_BASE + HW_RTC_CTRL);
+ spin_unlock(&stmp3xxx_wdt_io_lock);
+}
+
+static void wdt_ping(void)
+{
+ wdt_enable(heartbeat * WDOG_COUNTER_RATE);
+}
+
+static int stmp3xxx_wdt_open(struct inode *inode, struct file *file)
+{
+ if (test_and_set_bit(WDT_IN_USE, &wdt_status))
+ return -EBUSY;
+
+ clear_bit(WDT_OK_TO_CLOSE, &wdt_status);
+ wdt_ping();
+
+ return nonseekable_open(inode, file);
+}
+
+static ssize_t stmp3xxx_wdt_write(struct file *file, const char __user *data,
+ size_t len, loff_t *ppos)
+{
+ if (len) {
+ if (!nowayout) {
+ size_t i;
+
+ clear_bit(WDT_OK_TO_CLOSE, &wdt_status);
+
+ for (i = 0; i != len; i++) {
+ char c;
+
+ if (get_user(c, data + i))
+ return -EFAULT;
+ if (c == 'V')
+ set_bit(WDT_OK_TO_CLOSE, &wdt_status);
+ }
+ }
+ wdt_ping();
+ }
+
+ return len;
+}
+
+static struct watchdog_info ident = {
+ .options = WDIOF_CARDRESET |
+ WDIOF_MAGICCLOSE |
+ WDIOF_SETTIMEOUT |
+ WDIOF_KEEPALIVEPING,
+ .identity = "STMP3XXX Watchdog",
+};
+
+static long stmp3xxx_wdt_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ void __user *argp = (void __user *)arg;
+ int __user *p = argp;
+ int new_heartbeat, opts;
+ int ret = -ENOTTY;
+
+ switch (cmd) {
+ case WDIOC_GETSUPPORT:
+ ret = copy_to_user(argp, &ident, sizeof(ident)) ? -EFAULT : 0;
+ break;
+
+ case WDIOC_GETSTATUS:
+ ret = put_user(0, p);
+ break;
+
+ case WDIOC_GETBOOTSTATUS:
+ ret = put_user(boot_status, p);
+ break;
+
+ case WDIOC_SETOPTIONS:
+ if (get_user(opts, p)) {
+ ret = -EFAULT;
+ break;
+ }
+ if (opts & WDIOS_DISABLECARD)
+ wdt_disable();
+ else if (opts & WDIOS_ENABLECARD)
+ wdt_ping();
+ else {
+ pr_debug("%s: unknown option 0x%x\n", __func__, opts);
+ ret = -EINVAL;
+ break;
+ }
+ ret = 0;
+ break;
+
+ case WDIOC_KEEPALIVE:
+ wdt_ping();
+ ret = 0;
+ break;
+
+ case WDIOC_SETTIMEOUT:
+ if (get_user(new_heartbeat, p)) {
+ ret = -EFAULT;
+ break;
+ }
+ if (new_heartbeat <= 0 || new_heartbeat > MAX_HEARTBEAT) {
+ ret = -EINVAL;
+ break;
+ }
+
+ heartbeat = new_heartbeat;
+ wdt_ping();
+ /* Fall through */
+
+ case WDIOC_GETTIMEOUT:
+ ret = put_user(heartbeat, p);
+ break;
+ }
+ return ret;
+}
+
+static int stmp3xxx_wdt_release(struct inode *inode, struct file *file)
+{
+ int ret = 0;
+
+ if (!nowayout) {
+ if (!test_bit(WDT_OK_TO_CLOSE, &wdt_status)) {
+ wdt_ping();
+ pr_debug("%s: Device closed unexpectdly\n", __func__);
+ ret = -EINVAL;
+ } else {
+ wdt_disable();
+ clear_bit(WDT_OK_TO_CLOSE, &wdt_status);
+ }
+ }
+ clear_bit(WDT_IN_USE, &wdt_status);
+
+ return ret;
+}
+
+static const struct file_operations stmp3xxx_wdt_fops = {
+ .owner = THIS_MODULE,
+ .llseek = no_llseek,
+ .write = stmp3xxx_wdt_write,
+ .unlocked_ioctl = stmp3xxx_wdt_ioctl,
+ .open = stmp3xxx_wdt_open,
+ .release = stmp3xxx_wdt_release,
+};
+
+static struct miscdevice stmp3xxx_wdt_miscdev = {
+ .minor = WATCHDOG_MINOR,
+ .name = "watchdog",
+ .fops = &stmp3xxx_wdt_fops,
+};
+
+static int __devinit stmp3xxx_wdt_probe(struct platform_device *pdev)
+{
+ int ret = 0;
+
+ if (heartbeat < 1 || heartbeat > MAX_HEARTBEAT)
+ heartbeat = DEFAULT_HEARTBEAT;
+
+ boot_status = __raw_readl(REGS_RTC_BASE + HW_RTC_PERSISTENT1) &
+ BV_RTC_PERSISTENT1_GENERAL__RTC_FORCE_UPDATER;
+ boot_status = !!boot_status;
+ stmp3xxx_clearl(BV_RTC_PERSISTENT1_GENERAL__RTC_FORCE_UPDATER,
+ REGS_RTC_BASE + HW_RTC_PERSISTENT1);
+ wdt_disable(); /* disable for now */
+
+ ret = misc_register(&stmp3xxx_wdt_miscdev);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "cannot register misc device\n");
+ return ret;
+ }
+
+ printk(KERN_INFO "stmp3xxx watchdog: initialized, heartbeat %d sec\n",
+ heartbeat);
+
+ return ret;
+}
+
+static int __devexit stmp3xxx_wdt_remove(struct platform_device *pdev)
+{
+ misc_deregister(&stmp3xxx_wdt_miscdev);
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int wdt_suspended;
+static u32 wdt_saved_time;
+
+static int stmp3xxx_wdt_suspend(struct platform_device *pdev,
+ pm_message_t state)
+{
+ if (__raw_readl(REGS_RTC_BASE + HW_RTC_CTRL) &
+ BM_RTC_CTRL_WATCHDOGEN) {
+ wdt_suspended = 1;
+ wdt_saved_time = __raw_readl(REGS_RTC_BASE + HW_RTC_WATCHDOG);
+ wdt_disable();
+ }
+ return 0;
+}
+
+static int stmp3xxx_wdt_resume(struct platform_device *pdev)
+{
+ if (wdt_suspended) {
+ wdt_enable(wdt_saved_time);
+ wdt_suspended = 0;
+ }
+ return 0;
+}
+#else
+#define stmp3xxx_wdt_suspend NULL
+#define stmp3xxx_wdt_resume NULL
+#endif
+
+static struct platform_driver platform_wdt_driver = {
+ .driver = {
+ .name = "stmp3xxx_wdt",
+ },
+ .probe = stmp3xxx_wdt_probe,
+ .remove = __devexit_p(stmp3xxx_wdt_remove),
+ .suspend = stmp3xxx_wdt_suspend,
+ .resume = stmp3xxx_wdt_resume,
+};
+
+static int __init stmp3xxx_wdt_init(void)
+{
+ return platform_driver_register(&platform_wdt_driver);
+}
+
+static void __exit stmp3xxx_wdt_exit(void)
+{
+ return platform_driver_unregister(&platform_wdt_driver);
+}
+
+module_init(stmp3xxx_wdt_init);
+module_exit(stmp3xxx_wdt_exit);
+
+MODULE_DESCRIPTION("STMP3XXX Watchdog Driver");
+MODULE_LICENSE("GPL");
+
+module_param(heartbeat, int, 0);
+MODULE_PARM_DESC(heartbeat,
+ "Watchdog heartbeat period in seconds from 1 to "
+ __MODULE_STRING(MAX_HEARTBEAT) ", default "
+ __MODULE_STRING(DEFAULT_HEARTBEAT));
+
+MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
diff --git a/drivers/watchdog/twl4030_wdt.c b/drivers/watchdog/twl4030_wdt.c
new file mode 100644
index 000000000000..cb46556f2973
--- /dev/null
+++ b/drivers/watchdog/twl4030_wdt.c
@@ -0,0 +1,272 @@
+/*
+ * Copyright (C) Nokia Corporation
+ *
+ * Written by Timo Kokkonen <timo.t.kokkonen at nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/fs.h>
+#include <linux/watchdog.h>
+#include <linux/platform_device.h>
+#include <linux/miscdevice.h>
+#include <linux/uaccess.h>
+#include <linux/i2c/twl4030.h>
+
+#define TWL4030_WATCHDOG_CFG_REG_OFFS 0x3
+
+#define TWL4030_WDT_STATE_OPEN 0x1
+#define TWL4030_WDT_STATE_ACTIVE 0x8
+
+static struct platform_device *twl4030_wdt_dev;
+
+struct twl4030_wdt {
+ struct miscdevice miscdev;
+ int timer_margin;
+ unsigned long state;
+};
+
+static int nowayout = WATCHDOG_NOWAYOUT;
+module_param(nowayout, int, 0);
+MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started "
+ "(default=" __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
+
+static int twl4030_wdt_write(unsigned char val)
+{
+ return twl4030_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER, val,
+ TWL4030_WATCHDOG_CFG_REG_OFFS);
+}
+
+static int twl4030_wdt_enable(struct twl4030_wdt *wdt)
+{
+ return twl4030_wdt_write(wdt->timer_margin + 1);
+}
+
+static int twl4030_wdt_disable(struct twl4030_wdt *wdt)
+{
+ return twl4030_wdt_write(0);
+}
+
+static int twl4030_wdt_set_timeout(struct twl4030_wdt *wdt, int timeout)
+{
+ if (timeout < 0 || timeout > 30) {
+ dev_warn(wdt->miscdev.parent,
+ "Timeout can only be in the range [0-30] seconds");
+ return -EINVAL;
+ }
+ wdt->timer_margin = timeout;
+ return twl4030_wdt_enable(wdt);
+}
+
+static ssize_t twl4030_wdt_write_fop(struct file *file,
+ const char __user *data, size_t len, loff_t *ppos)
+{
+ struct twl4030_wdt *wdt = file->private_data;
+
+ if (len)
+ twl4030_wdt_enable(wdt);
+
+ return len;
+}
+
+static long twl4030_wdt_ioctl(struct file *file,
+ unsigned int cmd, unsigned long arg)
+{
+ void __user *argp = (void __user *)arg;
+ int __user *p = argp;
+ int new_margin;
+ struct twl4030_wdt *wdt = file->private_data;
+
+ static const struct watchdog_info twl4030_wd_ident = {
+ .identity = "TWL4030 Watchdog",
+ .options = WDIOF_SETTIMEOUT,
+ .firmware_version = 0,
+ };
+
+ switch (cmd) {
+ case WDIOC_GETSUPPORT:
+ return copy_to_user(argp, &twl4030_wd_ident,
+ sizeof(twl4030_wd_ident)) ? -EFAULT : 0;
+
+ case WDIOC_GETSTATUS:
+ case WDIOC_GETBOOTSTATUS:
+ return put_user(0, p);
+
+ case WDIOC_KEEPALIVE:
+ twl4030_wdt_enable(wdt);
+ break;
+
+ case WDIOC_SETTIMEOUT:
+ if (get_user(new_margin, p))
+ return -EFAULT;
+ if (twl4030_wdt_set_timeout(wdt, new_margin))
+ return -EINVAL;
+ return put_user(wdt->timer_margin, p);
+
+ case WDIOC_GETTIMEOUT:
+ return put_user(wdt->timer_margin, p);
+
+ default:
+ return -ENOTTY;
+ }
+
+ return 0;
+}
+
+static int twl4030_wdt_open(struct inode *inode, struct file *file)
+{
+ struct twl4030_wdt *wdt = platform_get_drvdata(twl4030_wdt_dev);
+
+ /* /dev/watchdog can only be opened once */
+ if (test_and_set_bit(0, &wdt->state))
+ return -EBUSY;
+
+ wdt->state |= TWL4030_WDT_STATE_ACTIVE;
+ file->private_data = (void *) wdt;
+
+ twl4030_wdt_enable(wdt);
+ return nonseekable_open(inode, file);
+}
+
+static int twl4030_wdt_release(struct inode *inode, struct file *file)
+{
+ struct twl4030_wdt *wdt = file->private_data;
+ if (nowayout) {
+ dev_alert(wdt->miscdev.parent,
+ "Unexpected close, watchdog still running!\n");
+ twl4030_wdt_enable(wdt);
+ } else {
+ if (twl4030_wdt_disable(wdt))
+ return -EFAULT;
+ wdt->state &= ~TWL4030_WDT_STATE_ACTIVE;
+ }
+
+ clear_bit(0, &wdt->state);
+ return 0;
+}
+
+static const struct file_operations twl4030_wdt_fops = {
+ .owner = THIS_MODULE,
+ .llseek = no_llseek,
+ .open = twl4030_wdt_open,
+ .release = twl4030_wdt_release,
+ .unlocked_ioctl = twl4030_wdt_ioctl,
+ .write = twl4030_wdt_write_fop,
+};
+
+static int __devinit twl4030_wdt_probe(struct platform_device *pdev)
+{
+ int ret = 0;
+ struct twl4030_wdt *wdt;
+
+ wdt = kzalloc(sizeof(struct twl4030_wdt), GFP_KERNEL);
+ if (!wdt)
+ return -ENOMEM;
+
+ wdt->state = 0;
+ wdt->timer_margin = 30;
+ wdt->miscdev.parent = &pdev->dev;
+ wdt->miscdev.fops = &twl4030_wdt_fops;
+ wdt->miscdev.minor = WATCHDOG_MINOR;
+ wdt->miscdev.name = "watchdog";
+
+ platform_set_drvdata(pdev, wdt);
+
+ twl4030_wdt_dev = pdev;
+
+ ret = misc_register(&wdt->miscdev);
+ if (ret) {
+ dev_err(wdt->miscdev.parent,
+ "Failed to register misc device\n");
+ platform_set_drvdata(pdev, NULL);
+ kfree(wdt);
+ twl4030_wdt_dev = NULL;
+ return ret;
+ }
+ return 0;
+}
+
+static int __devexit twl4030_wdt_remove(struct platform_device *pdev)
+{
+ struct twl4030_wdt *wdt = platform_get_drvdata(pdev);
+
+ if (wdt->state & TWL4030_WDT_STATE_ACTIVE)
+ if (twl4030_wdt_disable(wdt))
+ return -EFAULT;
+
+ wdt->state &= ~TWL4030_WDT_STATE_ACTIVE;
+ misc_deregister(&wdt->miscdev);
+
+ platform_set_drvdata(pdev, NULL);
+ kfree(wdt);
+ twl4030_wdt_dev = NULL;
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int twl4030_wdt_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ struct twl4030_wdt *wdt = platform_get_drvdata(pdev);
+ if (wdt->state & TWL4030_WDT_STATE_ACTIVE)
+ return twl4030_wdt_disable(wdt);
+
+ return 0;
+}
+
+static int twl4030_wdt_resume(struct platform_device *pdev)
+{
+ struct twl4030_wdt *wdt = platform_get_drvdata(pdev);
+ if (wdt->state & TWL4030_WDT_STATE_ACTIVE)
+ return twl4030_wdt_enable(wdt);
+
+ return 0;
+}
+#else
+#define twl4030_wdt_suspend NULL
+#define twl4030_wdt_resume NULL
+#endif
+
+static struct platform_driver twl4030_wdt_driver = {
+ .probe = twl4030_wdt_probe,
+ .remove = __devexit_p(twl4030_wdt_remove),
+ .suspend = twl4030_wdt_suspend,
+ .resume = twl4030_wdt_resume,
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "twl4030_wdt",
+ },
+};
+
+static int __devinit twl4030_wdt_init(void)
+{
+ return platform_driver_register(&twl4030_wdt_driver);
+}
+module_init(twl4030_wdt_init);
+
+static void __devexit twl4030_wdt_exit(void)
+{
+ platform_driver_unregister(&twl4030_wdt_driver);
+}
+module_exit(twl4030_wdt_exit);
+
+MODULE_AUTHOR("Nokia Corporation");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
+MODULE_ALIAS("platform:twl4030_wdt");
+
diff --git a/drivers/watchdog/w83627hf_wdt.c b/drivers/watchdog/w83627hf_wdt.c
index 916890abffdd..f201accc4e3d 100644
--- a/drivers/watchdog/w83627hf_wdt.c
+++ b/drivers/watchdog/w83627hf_wdt.c
@@ -89,6 +89,11 @@ static void w83627hf_select_wd_register(void)
c = ((inb_p(WDT_EFDR) & 0xf7) | 0x04); /* select WDT0 */
outb_p(0x2b, WDT_EFER);
outb_p(c, WDT_EFDR); /* set GPIO3 to WDT0 */
+ } else if (c == 0x88) { /* W83627EHF */
+ outb_p(0x2d, WDT_EFER); /* select GPIO5 */
+ c = inb_p(WDT_EFDR) & ~0x01; /* PIN77 -> WDT0# */
+ outb_p(0x2d, WDT_EFER);
+ outb_p(c, WDT_EFDR); /* set GPIO5 to WDT0 */
}
outb_p(0x07, WDT_EFER); /* point to logical device number reg */
diff --git a/drivers/watchdog/w83697ug_wdt.c b/drivers/watchdog/w83697ug_wdt.c
index 883b5f79673a..a6c12dec91a1 100644
--- a/drivers/watchdog/w83697ug_wdt.c
+++ b/drivers/watchdog/w83697ug_wdt.c
@@ -149,8 +149,10 @@ static void wdt_ctrl(int timeout)
{
spin_lock(&io_lock);
- if (w83697ug_select_wd_register() < 0)
+ if (w83697ug_select_wd_register() < 0) {
+ spin_unlock(&io_lock);
return;
+ }
outb_p(0xF4, WDT_EFER); /* Select CRF4 */
outb_p(timeout, WDT_EFDR); /* Write Timeout counter to CRF4 */
diff --git a/drivers/watchdog/wdrtas.c b/drivers/watchdog/wdrtas.c
index a4fe7a38d9b0..3bde56bce63a 100644
--- a/drivers/watchdog/wdrtas.c
+++ b/drivers/watchdog/wdrtas.c
@@ -218,16 +218,14 @@ static void wdrtas_timer_keepalive(void)
*/
static int wdrtas_get_temperature(void)
{
- long result;
+ int result;
int temperature = 0;
- result = rtas_call(wdrtas_token_get_sensor_state, 2, 2,
- (void *)__pa(&temperature),
- WDRTAS_THERMAL_SENSOR, 0);
+ result = rtas_get_sensor(WDRTAS_THERMAL_SENSOR, 0, &temperature);
if (result < 0)
printk(KERN_WARNING "wdrtas: reading the thermal sensor "
- "faild: %li\n", result);
+ "failed: %i\n", result);
else
temperature = ((temperature * 9) / 5) + 32; /* fahrenheit */
diff --git a/drivers/watchdog/wdt_pci.c b/drivers/watchdog/wdt_pci.c
index c45839a4a34d..7a1bdc7c95a9 100644
--- a/drivers/watchdog/wdt_pci.c
+++ b/drivers/watchdog/wdt_pci.c
@@ -2,7 +2,7 @@
* Industrial Computer Source PCI-WDT500/501 driver
*
* (c) Copyright 1996-1997 Alan Cox <alan@lxorguk.ukuu.org.uk>,
- * All Rights Reserved.
+ * All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
@@ -99,14 +99,16 @@ MODULE_PARM_DESC(nowayout,
"Watchdog cannot be stopped once started (default="
__MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
-#ifdef CONFIG_WDT_501_PCI
/* Support for the Fan Tachometer on the PCI-WDT501 */
static int tachometer;
-
module_param(tachometer, int, 0);
MODULE_PARM_DESC(tachometer,
- "PCI-WDT501 Fan Tachometer support (0=disable, default=0)");
-#endif /* CONFIG_WDT_501_PCI */
+ "PCI-WDT501 Fan Tachometer support (0=disable, default=0)");
+
+static int type = 500;
+module_param(type, int, 0);
+MODULE_PARM_DESC(type,
+ "PCI-WDT501 Card type (500 or 501 , default=500)");
/*
* Programming support
@@ -266,22 +268,21 @@ static int wdtpci_get_status(int *status)
*status |= WDIOF_EXTERN1;
if (new_status & WDC_SR_ISII1)
*status |= WDIOF_EXTERN2;
-#ifdef CONFIG_WDT_501_PCI
- if (!(new_status & WDC_SR_TGOOD))
- *status |= WDIOF_OVERHEAT;
- if (!(new_status & WDC_SR_PSUOVER))
- *status |= WDIOF_POWEROVER;
- if (!(new_status & WDC_SR_PSUUNDR))
- *status |= WDIOF_POWERUNDER;
- if (tachometer) {
- if (!(new_status & WDC_SR_FANGOOD))
- *status |= WDIOF_FANFAULT;
+ if (type == 501) {
+ if (!(new_status & WDC_SR_TGOOD))
+ *status |= WDIOF_OVERHEAT;
+ if (!(new_status & WDC_SR_PSUOVER))
+ *status |= WDIOF_POWEROVER;
+ if (!(new_status & WDC_SR_PSUUNDR))
+ *status |= WDIOF_POWERUNDER;
+ if (tachometer) {
+ if (!(new_status & WDC_SR_FANGOOD))
+ *status |= WDIOF_FANFAULT;
+ }
}
-#endif /* CONFIG_WDT_501_PCI */
return 0;
}
-#ifdef CONFIG_WDT_501_PCI
/**
* wdtpci_get_temperature:
*
@@ -300,7 +301,6 @@ static int wdtpci_get_temperature(int *temperature)
*temperature = (c * 11 / 15) + 7;
return 0;
}
-#endif /* CONFIG_WDT_501_PCI */
/**
* wdtpci_interrupt:
@@ -327,22 +327,22 @@ static irqreturn_t wdtpci_interrupt(int irq, void *dev_id)
printk(KERN_CRIT PFX "status %d\n", status);
-#ifdef CONFIG_WDT_501_PCI
- if (!(status & WDC_SR_TGOOD)) {
- u8 alarm = inb(WDT_RT);
- printk(KERN_CRIT PFX "Overheat alarm.(%d)\n", alarm);
- udelay(8);
- }
- if (!(status & WDC_SR_PSUOVER))
- printk(KERN_CRIT PFX "PSU over voltage.\n");
- if (!(status & WDC_SR_PSUUNDR))
- printk(KERN_CRIT PFX "PSU under voltage.\n");
- if (tachometer) {
- if (!(status & WDC_SR_FANGOOD))
- printk(KERN_CRIT PFX "Possible fan fault.\n");
+ if (type == 501) {
+ if (!(status & WDC_SR_TGOOD)) {
+ printk(KERN_CRIT PFX "Overheat alarm.(%d)\n",
+ inb(WDT_RT));
+ udelay(8);
+ }
+ if (!(status & WDC_SR_PSUOVER))
+ printk(KERN_CRIT PFX "PSU over voltage.\n");
+ if (!(status & WDC_SR_PSUUNDR))
+ printk(KERN_CRIT PFX "PSU under voltage.\n");
+ if (tachometer) {
+ if (!(status & WDC_SR_FANGOOD))
+ printk(KERN_CRIT PFX "Possible fan fault.\n");
+ }
}
-#endif /* CONFIG_WDT_501_PCI */
- if (!(status&WDC_SR_WCCR)) {
+ if (!(status & WDC_SR_WCCR)) {
#ifdef SOFTWARE_REBOOT
#ifdef ONLY_TESTING
printk(KERN_CRIT PFX "Would Reboot.\n");
@@ -371,12 +371,13 @@ static irqreturn_t wdtpci_interrupt(int irq, void *dev_id)
*/
static ssize_t wdtpci_write(struct file *file, const char __user *buf,
- size_t count, loff_t *ppos)
+ size_t count, loff_t *ppos)
{
if (count) {
if (!nowayout) {
size_t i;
+ /* In case it was set long ago */
expect_close = 0;
for (i = 0; i != count; i++) {
@@ -406,10 +407,10 @@ static ssize_t wdtpci_write(struct file *file, const char __user *buf,
static long wdtpci_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
- int new_heartbeat;
- int status;
void __user *argp = (void __user *)arg;
int __user *p = argp;
+ int new_heartbeat;
+ int status;
static struct watchdog_info ident = {
.options = WDIOF_SETTIMEOUT|
@@ -421,11 +422,12 @@ static long wdtpci_ioctl(struct file *file, unsigned int cmd,
/* Add options according to the card we have */
ident.options |= (WDIOF_EXTERN1|WDIOF_EXTERN2);
-#ifdef CONFIG_WDT_501_PCI
- ident.options |= (WDIOF_OVERHEAT|WDIOF_POWERUNDER|WDIOF_POWEROVER);
- if (tachometer)
- ident.options |= WDIOF_FANFAULT;
-#endif /* CONFIG_WDT_501_PCI */
+ if (type == 501) {
+ ident.options |= (WDIOF_OVERHEAT|WDIOF_POWERUNDER|
+ WDIOF_POWEROVER);
+ if (tachometer)
+ ident.options |= WDIOF_FANFAULT;
+ }
switch (cmd) {
case WDIOC_GETSUPPORT:
@@ -503,7 +505,6 @@ static int wdtpci_release(struct inode *inode, struct file *file)
return 0;
}
-#ifdef CONFIG_WDT_501_PCI
/**
* wdtpci_temp_read:
* @file: file handle to the watchdog board
@@ -554,7 +555,6 @@ static int wdtpci_temp_release(struct inode *inode, struct file *file)
{
return 0;
}
-#endif /* CONFIG_WDT_501_PCI */
/**
* notify_sys:
@@ -596,7 +596,6 @@ static struct miscdevice wdtpci_miscdev = {
.fops = &wdtpci_fops,
};
-#ifdef CONFIG_WDT_501_PCI
static const struct file_operations wdtpci_temp_fops = {
.owner = THIS_MODULE,
.llseek = no_llseek,
@@ -610,7 +609,6 @@ static struct miscdevice temp_miscdev = {
.name = "temperature",
.fops = &wdtpci_temp_fops,
};
-#endif /* CONFIG_WDT_501_PCI */
/*
* The WDT card needs to learn about soft shutdowns in order to
@@ -633,6 +631,11 @@ static int __devinit wdtpci_init_one(struct pci_dev *dev,
return -ENODEV;
}
+ if (type != 500 && type != 501) {
+ printk(KERN_ERR PFX "unknown card type '%d'.\n", type);
+ return -ENODEV;
+ }
+
if (pci_enable_device(dev)) {
printk(KERN_ERR PFX "Not possible to enable PCI Device\n");
return -ENODEV;
@@ -678,15 +681,15 @@ static int __devinit wdtpci_init_one(struct pci_dev *dev,
goto out_irq;
}
-#ifdef CONFIG_WDT_501_PCI
- ret = misc_register(&temp_miscdev);
- if (ret) {
- printk(KERN_ERR PFX
+ if (type == 501) {
+ ret = misc_register(&temp_miscdev);
+ if (ret) {
+ printk(KERN_ERR PFX
"cannot register miscdev on minor=%d (err=%d)\n",
- TEMP_MINOR, ret);
- goto out_rbt;
+ TEMP_MINOR, ret);
+ goto out_rbt;
+ }
}
-#endif /* CONFIG_WDT_501_PCI */
ret = misc_register(&wdtpci_miscdev);
if (ret) {
@@ -698,20 +701,18 @@ static int __devinit wdtpci_init_one(struct pci_dev *dev,
printk(KERN_INFO PFX "initialized. heartbeat=%d sec (nowayout=%d)\n",
heartbeat, nowayout);
-#ifdef CONFIG_WDT_501_PCI
- printk(KERN_INFO "wdt: Fan Tachometer is %s\n",
+ if (type == 501)
+ printk(KERN_INFO "wdt: Fan Tachometer is %s\n",
(tachometer ? "Enabled" : "Disabled"));
-#endif /* CONFIG_WDT_501_PCI */
ret = 0;
out:
return ret;
out_misc:
-#ifdef CONFIG_WDT_501_PCI
- misc_deregister(&temp_miscdev);
+ if (type == 501)
+ misc_deregister(&temp_miscdev);
out_rbt:
-#endif /* CONFIG_WDT_501_PCI */
unregister_reboot_notifier(&wdtpci_notifier);
out_irq:
free_irq(irq, &wdtpci_miscdev);
@@ -728,9 +729,8 @@ static void __devexit wdtpci_remove_one(struct pci_dev *pdev)
/* here we assume only one device will ever have
* been picked up and registered by probe function */
misc_deregister(&wdtpci_miscdev);
-#ifdef CONFIG_WDT_501_PCI
- misc_deregister(&temp_miscdev);
-#endif /* CONFIG_WDT_501_PCI */
+ if (type == 501)
+ misc_deregister(&temp_miscdev);
unregister_reboot_notifier(&wdtpci_notifier);
free_irq(irq, &wdtpci_miscdev);
release_region(io, 16);
diff --git a/drivers/xen/events.c b/drivers/xen/events.c
index 891d2e90753a..abad71b1632b 100644
--- a/drivers/xen/events.c
+++ b/drivers/xen/events.c
@@ -927,9 +927,9 @@ static struct irq_chip xen_dynamic_chip __read_mostly = {
void __init xen_init_IRQ(void)
{
int i;
- size_t size = nr_cpu_ids * sizeof(struct cpu_evtchn_s);
- cpu_evtchn_mask_p = alloc_bootmem(size);
+ cpu_evtchn_mask_p = kcalloc(nr_cpu_ids, sizeof(struct cpu_evtchn_s),
+ GFP_KERNEL);
BUG_ON(cpu_evtchn_mask_p == NULL);
init_evtchn_cpu_bindings();